Current File : //var/log/grafana-agent.log |
ts=2022-07-25T18:42:55Z level=info caller=traces/traces.go:135 msg="Traces Logger Initialized" component=traces
[ OK ]
ts=2022-07-28T18:43:40Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-07-28T18:50:22.130151645Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-28T18:56:11.377900007Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-28T19:02:52.28601536Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-28T19:09:18.243130413Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="error tailing WAL" err="segments: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal: no such file or directory"
[ OK ]
ts=2022-07-28T19:09:18Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-07-28T19:09:53.196392679Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-28T19:16:47.629532759Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-28T19:16:47.924428709Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T19:22:07.742012392Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T19:28:48.744757785Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T19:36:10.143465559Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-28T19:41:45.599407236Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T19:48:11.361136554Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-28T19:54:12.47769655Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-28T20:00:57.025770638Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T20:07:26.000922487Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T20:13:15.557145485Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-28T20:19:53.480130955Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-28T20:26:17.971274922Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-28T20:31:46.988696419Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-28T20:37:34.309641613Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T20:44:41.736869527Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T20:50:57.572884574Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-28T20:56:48.459660361Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T21:03:07.183009269Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T21:10:04.635634129Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-28T21:15:34.363283374Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T21:22:14.170993548Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-28T21:27:22.445893572Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-28T21:35:04.098764834Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T21:40:57.239933954Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T21:48:05.484940806Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-28T21:54:35.593749411Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T22:00:41.580987946Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-28T22:06:04.568553098Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-28T22:12:00.202502761Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T22:19:16.717257006Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T22:24:54.558617139Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-28T22:31:57.928742388Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T22:37:52.878103331Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-28T22:44:04.456376573Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-28T22:50:09.46398119Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-28T22:57:12.494905939Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T23:04:24.603034868Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T23:10:51.106171182Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-28T23:18:08.547340823Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T23:25:29.862812911Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-28T23:30:41.179297428Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-28T23:37:11.015727147Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-28T23:43:23.806270331Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-28T23:49:07.308336187Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-28T23:56:03.754419171Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T00:02:13.858489165Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T00:09:09.73175793Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T00:15:50.60931781Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T00:21:59.236081467Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T00:27:16.987165654Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T00:34:13.1933447Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T00:39:50.692168987Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T00:45:10.301138567Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T00:51:13.980229976Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T00:57:17.597859288Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T01:04:03.618688962Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T01:10:48.269165876Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T01:16:06.024658387Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T01:23:15.832949082Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T01:29:50.8038804Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T01:36:08.639369716Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T01:42:05.532740218Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T01:48:28.238830894Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T01:54:42.220470737Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T02:00:21.19195482Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T02:06:52.06002416Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T02:14:21.169433677Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T02:20:36.970878823Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T02:27:44.158053582Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T02:33:53.064590637Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T02:39:24.109562486Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T02:44:56.34006928Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T02:51:18.483116632Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T02:57:17.464628901Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T03:03:35.888210167Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T03:10:48.696147904Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T03:16:15.869521957Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T03:22:55.450818789Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T03:28:26.202437203Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T03:34:17.988150443Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T03:40:50.38785347Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T03:47:45.31711181Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T03:54:46.760803835Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T04:00:31.341541171Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T04:07:32.611244757Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T04:13:43.82927118Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T04:19:36.914251154Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T04:26:22.894072423Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T04:32:48.80295099Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T04:38:59.294618014Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T04:45:37.556382813Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T04:51:20.970171407Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T04:57:21.509584835Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T05:04:45.117451338Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T05:10:45.69878319Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T05:18:08.350173746Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T05:23:58.535306939Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T05:29:50.532406519Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T05:35:59.195462201Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T05:41:54.676048568Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T05:47:20.879569418Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T05:52:37.183454087Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T05:58:12.676987151Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T06:03:29.491644142Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T06:10:12.17754081Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T06:17:21.782567916Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T06:22:23.874595119Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T06:29:10.259627649Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T06:36:38.750476234Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T06:43:41.381935131Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T06:50:52.368129369Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T06:58:35.301277173Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T07:05:16.892465121Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T07:10:24.937942742Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T07:16:18.79544197Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T07:22:41.535877253Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T07:27:31.858460112Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T07:34:20.877111748Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T07:40:53.940758851Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T07:48:10.287745718Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T07:54:59.975510967Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T08:01:28.51565936Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T08:08:13.432892997Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T08:13:58.728199701Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T08:20:15.225177826Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T08:26:15.442345415Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T08:32:58.204238638Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T08:40:36.813696314Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T08:47:09.86686229Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T08:53:35.579106544Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T08:59:20.842608834Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T09:05:10.865467484Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T09:10:32.260773686Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T09:16:05.567318878Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T09:21:37.610611599Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T09:28:08.295196799Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T09:33:24.006884373Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T09:39:45.67245619Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T09:45:54.452554066Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T09:51:12.678687361Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T09:56:43.514943735Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T10:02:22.629759869Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T10:09:05.962425951Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T10:15:24.019253595Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T10:22:29.864413318Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T10:27:56.970904418Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T10:32:51.317201523Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T10:39:44.922127075Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T10:46:43.166460682Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T10:53:51.735199305Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T11:00:48.423248655Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T11:06:57.523019664Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T11:13:12.745141393Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T11:19:34.959673278Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T11:24:51.86785136Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T11:32:10.343145843Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T11:38:45.166565525Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T11:45:54.510641312Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T11:51:25.825688723Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T11:57:57.970404126Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T12:05:57.124222154Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T12:13:18.650597153Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T12:18:50.229303664Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T12:25:48.500731628Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T12:31:23.257889726Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T12:38:23.429919467Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T12:43:52.61061291Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T12:51:29.101077269Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T12:57:28.796216291Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T13:04:29.717746132Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T13:09:31.328950407Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T13:16:48.730389918Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T13:24:09.9626914Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T13:30:43.308053067Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T13:37:37.171516536Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T13:45:17.942164023Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T13:52:07.069474226Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T13:58:32.191112931Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T14:04:37.245188799Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T14:10:11.806341473Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T14:15:25.530483977Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T14:22:08.125130346Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T14:27:06.1451019Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T14:31:52.287995778Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T14:37:36.317477088Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T14:44:06.633581231Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T14:49:54.643738242Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T14:56:59.231007432Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T15:03:43.639897411Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T15:08:45.268260242Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T15:15:56.182436003Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T15:22:58.209656583Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T15:28:06.021618031Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T15:33:39.64285641Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T15:40:47.811871103Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T15:47:48.4754023Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T15:52:52.062971927Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T15:58:46.636267117Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T16:05:53.406616967Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T16:11:07.495834064Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T16:16:56.2392432Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T16:21:47.931023665Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T16:27:45.417225612Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T16:35:21.084567357Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T16:41:19.508721168Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T16:48:43.924537964Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T16:55:19.173790856Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T17:02:07.850237458Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T17:08:34.080593633Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T17:14:34.897292983Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T17:19:56.585591826Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T17:25:52.479103576Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T17:33:01.023556973Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T17:39:06.406013106Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T17:44:50.428553849Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T17:51:03.208020118Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T17:57:42.646794159Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T18:03:52.736816549Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T18:09:23.014012156Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T18:15:58.38058356Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T18:22:49.480866857Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T18:29:25.474289363Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T18:37:05.980039119Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T18:44:50.750658347Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T18:51:18.246664443Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T18:58:36.919616801Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T19:04:51.0394945Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T19:10:00.688002805Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T19:16:05.660044419Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T19:23:05.832615083Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T19:30:03.829728787Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T19:35:57.24799421Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T19:43:03.662087527Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T19:49:21.845430681Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T19:56:12.266186761Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T20:02:10.177380307Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T20:08:45.652982929Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T20:14:56.437468063Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T20:22:41.37433496Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T20:28:50.066150325Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T20:33:45.265283123Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T20:41:05.699067715Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T20:47:48.008634418Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T20:54:06.789943819Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T20:59:44.389975192Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T21:05:51.676681931Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T21:13:16.389353012Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T21:20:13.519484575Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T21:27:27.539300383Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T21:34:31.642671685Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T21:41:04.961999347Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T21:46:56.773637045Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T21:52:43.154984207Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T21:59:35.395934752Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T22:07:12.396005928Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T22:13:18.860452196Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T22:19:09.250465379Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T22:24:42.959627165Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T22:30:24.700066248Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T22:35:43.347157133Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T22:41:30.437108139Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T22:48:25.032756217Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T22:54:01.217568524Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T22:59:35.809454927Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T23:06:59.866427377Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T23:12:14.007794369Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T23:18:54.683454474Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-29T23:26:20.277989909Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T23:32:38.441474431Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T23:38:20.256374582Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T23:45:12.874112934Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-29T23:52:21.705139787Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-29T23:59:53.815362642Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T00:05:52.717346799Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T00:11:01.060316992Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T00:18:24.522351699Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T00:24:36.82206696Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T00:31:24.561368011Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T00:38:32.947745931Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T00:43:44.44473805Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T00:51:05.825845862Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T00:58:00.218586962Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T01:04:30.976229159Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T01:11:13.421010831Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T01:17:04.557682895Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T01:23:08.018656998Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T01:30:38.287761626Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T01:37:31.514440982Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T01:43:45.13594085Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T01:50:03.771351224Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T01:56:55.266984205Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T02:03:59.709505704Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T02:10:17.563101022Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T02:16:23.1618075Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T02:22:26.425646066Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T02:28:43.129001689Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T02:34:44.876516657Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T02:42:12.741535319Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T02:47:27.263271922Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T02:54:06.461099861Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T03:01:44.718943964Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T03:08:42.00337155Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T03:16:18.28474221Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T03:22:57.337927923Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T03:29:49.170677462Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T03:35:57.459259736Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T03:43:15.115202594Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T03:49:54.64651734Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T03:56:30.536644867Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T04:03:44.58349104Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T04:09:54.479351703Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T04:16:44.420557014Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T04:23:51.251753049Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T04:30:28.532512542Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T04:37:23.5720914Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T04:44:23.931106892Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T04:51:22.925562998Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T04:58:53.928228743Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T05:03:54.597630363Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T05:10:19.960354418Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T05:17:37.16589468Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T05:23:36.59745678Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T05:28:52.074238267Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T05:36:05.092771264Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T05:42:39.333432425Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T05:49:30.722913866Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T05:55:03.215269828Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T06:02:06.000136006Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T06:08:13.563261198Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T06:15:56.190684767Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T06:22:49.249755235Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T06:29:32.154785205Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T06:35:32.481062951Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T06:41:58.271743797Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T06:48:34.194077291Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T06:55:04.844415931Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T07:01:32.14579758Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T07:09:00.762471454Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T07:15:50.416458106Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T07:22:04.420963055Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T07:29:17.021701014Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T07:35:05.130024496Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T07:42:21.199110996Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T07:48:50.895909332Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T07:53:50.224905012Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T08:00:53.572650277Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T08:06:42.509567024Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T08:12:41.665974673Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T08:18:26.858874988Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T08:24:48.688008325Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T08:30:56.914426677Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T08:37:51.285651613Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T08:44:29.670665926Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T08:50:10.545867812Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T08:57:22.538280196Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T09:04:09.709560389Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T09:10:05.62893671Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T09:16:58.183968832Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T09:22:53.796961607Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T09:29:21.75591527Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T09:36:00.770678411Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T09:43:00.645967836Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T09:50:16.896142189Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T09:56:18.135236071Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T10:04:35.460005635Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T10:11:08.511386983Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T10:16:29.431277395Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T10:23:36.402552925Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T10:30:11.634493499Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T10:37:27.383245691Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T10:43:12.770681182Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T10:49:04.361012154Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T10:56:42.597535203Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T11:02:56.239124498Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T11:10:14.919459303Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T11:15:42.392801609Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T11:21:47.885114219Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T11:27:48.279678201Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T11:34:36.585912164Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T11:41:13.848626691Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T11:48:37.669705852Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T11:54:24.589368487Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T12:00:52.876244638Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T12:06:37.846851781Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T12:12:52.82389191Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T12:18:29.190541737Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T12:25:28.90266503Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T12:30:55.392566647Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T12:36:40.726044729Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T12:42:46.748691309Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T12:47:35.220930713Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T12:53:42.144495911Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T13:00:29.788057052Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T13:06:18.517311089Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T13:12:36.00627838Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T13:19:42.330967604Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T13:26:11.755247042Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T13:33:43.610074378Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T13:39:36.7559247Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T13:46:42.728743592Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T13:53:20.994861116Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T14:00:07.869343923Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T14:07:15.4116677Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T14:13:16.600754139Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T14:19:12.277561666Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T14:24:37.792443332Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T14:30:49.656944164Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T14:36:14.324047318Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T14:42:04.199653149Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T14:47:32.604014081Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T14:54:18.318306815Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T15:01:03.41325356Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T15:06:29.309228747Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T15:12:39.48633434Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T15:18:35.072815865Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T15:24:36.318896039Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T15:30:18.642430996Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T15:35:58.094262725Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T15:43:03.57395433Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T15:49:37.890856971Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T15:57:09.163185698Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T16:03:22.922185899Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T16:09:35.998117544Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T16:16:39.745782222Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T16:23:17.204834989Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T16:30:04.521152385Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T16:36:23.396981878Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T16:42:24.651887763Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T16:48:44.44179193Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T16:54:30.798110828Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T17:00:19.874775774Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T17:06:57.208584613Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T17:13:26.70421805Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T17:20:11.842358724Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T17:27:10.590386716Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T17:32:47.367205304Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T17:39:52.90585641Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T17:47:05.480662402Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T17:53:07.293814636Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T17:59:24.579135798Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T18:06:51.310563407Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T18:12:19.170860617Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T18:18:21.94642694Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T18:25:25.849189394Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T18:31:32.824541226Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T18:36:57.878087664Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T18:43:09.536768654Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T18:49:06.312897006Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T18:56:13.325458425Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T19:03:17.398007641Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T19:08:31.201913675Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T19:16:18.857760367Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T19:22:23.081118834Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T19:28:58.31852995Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T19:34:27.305379031Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T19:41:10.594593426Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T19:47:43.327890598Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T19:55:17.468124966Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T20:01:29.45711702Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T20:08:22.016822487Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T20:14:35.162934694Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T20:20:05.5254123Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T20:25:51.419945348Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T20:32:10.485955547Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T20:37:50.235894485Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T20:43:47.236763926Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T20:50:08.122541253Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T20:57:22.501126685Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T21:03:42.609595574Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T21:09:17.643918403Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T21:15:54.748457052Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T21:22:00.480225364Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T21:28:11.518962138Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T21:33:17.898334129Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T21:40:31.098159717Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T21:47:31.91743302Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T21:54:24.78028765Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T22:00:44.341813367Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T22:07:24.339486047Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T22:14:45.717199092Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T22:21:14.906351755Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T22:26:48.132728574Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T22:32:47.910673575Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T22:40:35.862322992Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T22:48:06.012131561Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T22:55:01.154229112Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T23:00:38.209289924Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T23:06:09.897467786Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T23:13:22.216222897Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T23:20:03.489318345Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T23:27:12.140508949Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T23:32:32.279197256Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T23:38:58.361837475Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-30T23:45:53.406096116Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-30T23:51:09.29908151Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-30T23:57:06.736764329Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T00:03:08.036256631Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T00:08:59.545483354Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T00:14:19.007082189Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T00:20:08.448194576Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T00:26:47.802909598Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T00:32:59.703689019Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T00:38:40.916150982Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T00:45:02.663615969Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T00:50:29.5168822Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T00:57:16.479552618Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T01:03:14.435924435Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T01:10:17.538225758Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T01:17:13.25885382Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T01:23:12.2923122Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T01:30:19.3099451Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T01:35:53.761431224Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T01:42:19.983312548Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T01:47:58.782655656Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T01:53:05.412154044Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T01:58:59.244266818Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T02:06:07.828681124Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T02:11:54.394877487Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T02:19:13.388018698Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T02:24:51.232528281Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T02:31:29.481426071Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T02:37:27.050026606Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T02:44:58.71153765Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T02:52:16.427837735Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T03:00:06.149783381Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T03:05:56.218483976Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T03:12:53.491534712Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T03:20:00.839151049Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T03:27:07.224013384Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T03:32:35.688303807Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T03:38:58.357573297Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T03:44:21.832499532Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T03:50:58.355156953Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T03:56:44.882496218Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T04:02:45.470467298Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T04:08:21.151357745Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T04:14:45.393893162Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T04:21:49.125397395Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T04:28:26.185028465Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T04:35:43.593750479Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T04:42:04.876016634Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T04:48:04.186818095Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T04:54:51.052594038Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T05:01:50.509610785Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T05:09:26.937985918Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T05:16:12.015032977Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T05:22:27.195263919Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T05:29:06.93078905Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T05:35:30.995827776Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T05:42:48.810803068Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T05:49:49.358690058Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T05:56:41.39651822Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T06:02:01.488018297Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T06:08:49.990131367Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T06:16:13.69172667Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T06:22:00.639645767Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T06:28:10.808030132Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T06:34:05.118251163Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T06:41:01.575343864Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T06:47:33.290120129Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T06:52:51.477901684Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T07:00:09.730963261Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T07:05:23.80418518Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T07:12:17.353345921Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T07:17:19.315139837Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T07:23:21.68932758Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T07:30:09.832582607Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T07:36:47.127589567Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T07:43:25.903359316Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T07:49:59.207574774Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T07:57:15.917965057Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T08:04:26.27578382Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T08:10:14.771877674Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T08:15:43.582045037Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T08:21:09.234668101Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T08:27:52.037897073Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T08:33:54.114782671Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T08:40:18.843294456Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T08:47:51.986049968Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T08:53:40.307820999Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T08:59:13.3260723Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T09:06:13.148320782Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T09:12:59.819686319Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T09:18:04.791839026Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T09:25:39.678509083Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T09:32:24.849701291Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T09:38:42.053590072Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T09:45:45.488087888Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T09:52:05.104947633Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T09:59:25.419222824Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T10:06:06.083783543Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T10:13:03.799519528Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T10:19:23.672987394Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T10:27:03.356685206Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T10:32:55.74389447Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T10:38:55.500366095Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T10:44:42.809111165Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T10:52:28.296246116Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T10:58:12.814651858Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T11:05:15.501796758Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T11:12:28.032376893Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T11:18:20.900728019Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T11:23:12.986284738Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T11:29:44.619520211Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T11:36:35.223399409Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T11:43:36.928317313Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T11:49:23.209601203Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T11:54:22.997617439Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T12:01:03.232465319Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T12:08:24.865556411Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T12:13:24.147424724Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T12:19:34.694854539Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T12:24:26.726668768Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T12:30:04.592882629Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T12:37:28.865953393Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T12:44:24.855816623Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T12:51:07.930976987Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T12:57:04.251344302Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T13:03:35.13928591Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T13:10:07.220197043Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T13:17:28.167606041Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T13:24:34.279363203Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T13:30:28.38164478Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T13:37:17.238701718Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T13:44:02.439620723Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T13:50:02.94284205Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T13:55:11.017967203Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T14:00:52.310734811Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T14:07:04.504923123Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T14:14:10.889816893Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T14:20:12.289903303Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T14:25:52.945893883Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T14:31:44.194857639Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T14:37:59.457734804Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T14:45:10.154363564Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T14:52:34.817450722Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T14:58:37.836435196Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T15:04:31.571159382Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T15:10:56.603798068Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T15:17:15.867262466Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T15:24:26.362453246Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T15:30:40.470864347Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T15:37:12.265209608Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T15:43:55.53896066Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T15:51:13.496021101Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T15:58:35.742466673Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T16:03:51.996243217Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T16:09:44.619751634Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T16:15:34.19991992Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T16:22:36.668623556Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T16:29:06.851897618Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T16:35:54.648460808Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T16:43:01.024460164Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T16:48:07.198864368Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T16:54:54.432363697Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T17:00:02.889552955Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T17:06:23.684761111Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T17:13:14.656275747Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T17:20:45.421473944Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T17:27:45.542557136Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T17:34:52.41832351Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T17:41:48.479030008Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T17:46:58.491764089Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T17:52:29.187476356Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T17:58:26.82903497Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T18:05:22.228681589Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T18:10:11.125301615Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T18:15:27.816417628Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T18:20:57.693579904Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T18:27:27.225415316Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T18:32:16.265491078Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T18:38:30.287969923Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T18:43:38.140503996Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T18:50:30.51345374Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T18:57:23.13367904Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T19:02:48.708993399Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T19:09:50.882396054Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T19:14:55.592886442Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T19:20:47.147503408Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T19:27:40.048294177Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T19:34:29.267398486Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T19:41:34.736178084Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T19:47:53.511519616Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T19:53:35.85533353Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T20:00:58.448836962Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T20:07:51.59475254Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T20:13:32.659609295Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T20:20:23.269561716Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T20:26:11.499587082Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T20:32:18.225385951Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T20:39:35.582998489Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T20:45:04.044679936Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T20:51:56.427357949Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T20:58:04.655819953Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T21:05:03.61165811Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T21:11:50.234043814Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T21:17:35.530551558Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T21:24:41.592100317Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T21:30:25.511394359Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T21:37:23.746507338Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T21:43:34.311966379Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T21:49:37.555130512Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T21:56:21.562981598Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T22:02:24.410352042Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T22:08:28.146049949Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T22:14:57.466799239Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T22:21:52.209898064Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T22:28:13.751885827Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T22:33:34.904677807Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T22:39:40.250123426Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T22:45:22.479603161Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T22:52:03.83077023Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T22:59:05.261038549Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T23:06:03.44842651Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T23:12:30.500794841Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T23:18:15.989700768Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T23:24:04.353824372Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T23:30:34.218644212Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T23:37:50.559354578Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-07-31T23:45:15.08393361Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-07-31T23:52:20.007236622Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-07-31T23:59:19.80515395Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T00:05:38.921426293Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T00:12:37.822763682Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T00:19:24.551636792Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T00:25:31.013292464Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T00:30:30.994277161Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T00:37:13.394979072Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T00:44:41.298454204Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T00:50:48.217607503Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T00:57:46.728407396Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T01:03:55.197611488Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T01:10:45.07323098Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T01:15:43.292242462Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T01:20:56.149330068Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T01:28:34.773809685Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T01:34:49.235568319Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T01:40:38.991359932Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T01:47:50.973817799Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T01:55:05.98158277Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T02:01:26.918133712Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T02:07:43.080705389Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T02:13:23.169129107Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T02:19:31.597833346Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T02:26:04.150778508Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T02:32:08.344594481Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T02:38:53.537473023Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T02:46:01.424449264Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T02:52:19.829505809Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T02:58:44.744744617Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T03:04:17.901990311Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T03:10:11.67601183Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T03:16:16.580191024Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T03:23:30.367612659Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T03:30:27.45011041Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T03:37:28.660024895Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T03:44:17.769841849Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T03:49:12.854834008Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T03:56:06.294159654Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T04:01:50.079051406Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T04:08:11.873171607Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T04:13:38.321281423Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T04:19:48.711871704Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T04:27:53.079132107Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T04:33:23.69504082Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T04:40:33.242236937Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T04:45:44.704087355Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T04:52:40.674479532Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T05:00:04.216953411Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T05:05:53.726937369Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T05:11:28.214496316Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T05:17:24.453467018Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T05:24:34.131948588Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T05:29:45.91249188Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T05:35:12.588512703Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T05:40:23.837984721Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T05:45:26.617405119Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T05:51:27.609675631Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T05:57:04.534554219Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T06:03:35.25407859Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T06:09:23.472944373Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T06:15:10.361637144Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T06:21:38.65428483Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T06:28:10.791440731Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T06:34:40.370966892Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T06:40:45.025013727Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T06:46:52.585146187Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T06:53:28.230392288Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T07:00:13.772400998Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T07:05:32.483278295Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T07:12:08.570862982Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T07:18:21.276054041Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T07:23:56.655645295Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T07:31:09.18906273Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T07:37:10.352896386Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T07:42:28.763245855Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T07:48:31.496561741Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T07:54:43.206683849Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T08:00:10.29474001Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T08:05:35.336176732Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T08:10:53.213232687Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T08:16:13.057954829Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T08:23:20.137376695Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T08:28:58.797324927Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T08:35:28.669890352Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T08:41:36.296970381Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T08:48:50.842230639Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T08:54:43.40901593Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T09:00:32.17411454Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T09:06:24.537319036Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T09:12:32.96840793Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T09:18:39.401676958Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T09:24:03.27304971Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T09:31:11.638658138Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T09:38:51.13212619Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T09:45:04.333923731Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T09:51:15.383412023Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T09:58:39.057934699Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T10:05:05.853222177Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T10:12:28.742839216Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T10:18:03.57487847Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T10:24:00.278458548Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T10:30:43.877190317Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T10:36:35.581037408Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T10:42:08.155530285Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T10:47:52.414353206Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T10:53:35.073052733Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T11:00:32.02896715Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T11:06:21.314990051Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T11:13:31.233087532Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T11:18:42.935767808Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T11:25:35.984336024Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T11:31:23.938280693Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T11:37:15.243820307Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T11:43:15.549013403Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T11:50:26.687314077Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T11:56:35.788342184Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T12:02:00.037464175Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T12:09:03.50964472Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T12:14:50.593297388Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T12:21:54.453256493Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T12:28:21.082340391Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T12:34:56.844244267Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T12:42:24.79335459Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T12:47:49.294718853Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T12:53:38.335182618Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T13:00:47.19697348Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T13:06:53.466338734Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T13:14:03.624594684Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T13:21:38.414128247Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T13:28:46.111961758Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T13:35:38.455466718Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T13:41:24.723921137Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T13:48:34.211517935Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T13:55:32.04378819Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T14:01:00.165976745Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T14:08:03.548323095Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T14:13:28.583309664Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T14:20:41.437362484Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T14:28:12.323258859Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T14:34:38.665682533Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T14:40:50.837817507Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T14:48:41.644105938Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T14:55:15.705734892Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T15:00:30.065628176Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T15:06:01.365492772Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T15:11:31.752424539Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T15:17:19.892950472Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T15:23:15.976439749Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T15:28:42.752849837Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T15:34:35.795161012Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T15:41:23.537925105Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T15:48:29.717729291Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T15:55:16.699791849Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T16:02:11.695550875Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T16:08:47.661423348Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T16:15:31.524627392Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T16:22:13.49879136Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T16:28:08.450897411Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T16:35:06.844481652Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T16:40:35.945567228Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T16:47:20.523419739Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T16:54:52.33470212Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T16:59:41.006550904Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T17:06:34.01779915Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T17:12:50.185349779Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T17:17:48.341234085Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T17:24:06.903433565Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T17:29:29.649258137Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T17:36:04.437214589Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T17:43:36.704692512Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T17:49:11.337381986Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T17:56:04.082683462Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T18:01:49.548655553Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T18:08:55.453743876Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T18:14:46.557484768Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T18:20:12.865079307Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T18:26:21.253028648Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T18:32:09.61776159Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T18:39:17.642136672Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T18:46:34.840271777Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T18:53:03.533482862Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T19:00:05.448160133Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T19:05:42.293997958Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T19:12:40.864055785Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T19:19:33.697383592Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T19:25:54.767018966Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T19:31:24.149601731Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T19:38:48.295319599Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T19:45:32.155243917Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T19:52:03.751146649Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T19:57:27.06810882Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T20:04:25.335580787Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T20:10:37.729759495Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T20:17:48.787331356Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T20:23:24.639409008Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T20:28:53.827431132Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T20:35:12.725754222Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T20:40:16.164170923Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T20:45:56.067961472Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T20:53:21.969878317Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T21:01:07.997886527Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T21:08:11.140551138Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T21:15:13.369472195Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T21:21:40.795904417Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T21:28:09.377000536Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T21:33:21.127700991Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T21:38:40.927441579Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T21:45:51.131215571Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T21:51:52.379277992Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T21:57:25.926057903Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T22:02:48.468104367Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T22:09:37.881230281Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T22:14:47.040232862Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T22:20:49.131774751Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T22:27:48.304456319Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T22:34:43.020610591Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T22:41:50.114512014Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T22:48:58.267156072Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T22:53:53.26864874Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T23:00:23.542604923Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T23:07:03.314293603Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T23:12:34.085123022Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T23:19:27.539993981Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T23:25:43.362573594Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T23:32:11.935378997Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T23:37:14.532448239Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-01T23:43:07.266267818Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-01T23:48:20.393515209Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-01T23:54:44.001560124Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T00:01:41.213971753Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T00:06:56.229736464Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T00:12:25.511429606Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T00:18:07.994571922Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T00:23:53.975518056Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T00:31:14.743366251Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T00:37:07.919783691Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T00:44:11.222894933Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T00:49:14.164032175Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T00:55:23.623938965Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T01:00:56.457320731Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T01:06:37.599881565Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T01:14:08.39529778Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T01:19:24.14013747Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T01:24:55.438074041Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T01:31:14.213365924Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T01:37:39.71654075Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T01:44:37.063228327Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T01:50:36.154762683Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T01:56:13.165200989Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T02:03:07.692827156Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T02:09:50.214892219Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T02:15:08.57394689Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T02:21:09.504918214Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T02:27:16.096875888Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T02:33:43.403314839Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T02:40:33.750696047Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T02:45:55.992443361Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T02:51:04.300539575Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T02:57:48.580224814Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T03:04:48.809262976Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T03:12:03.720439864Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T03:17:10.311173605Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T03:23:49.125475915Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T03:28:39.98618407Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T03:34:55.441140463Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T03:41:28.294064882Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T03:47:17.889168496Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T03:54:41.484215878Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T04:02:06.392347446Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T04:09:00.944359335Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T04:16:44.111862089Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T04:23:08.421150127Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T04:29:12.342727367Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T04:35:07.729319988Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T04:42:34.77889519Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T04:48:21.172844147Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T04:54:03.157434076Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T04:59:42.344176638Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T05:05:29.292474774Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T05:12:01.068258705Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T05:17:28.025064245Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T05:22:35.11578082Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T05:29:10.405566608Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T05:34:37.903757009Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T05:40:53.568881286Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T05:46:22.054068141Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T05:53:37.504251389Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T06:01:20.043375559Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T06:08:15.213862651Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T06:15:29.580437963Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T06:21:38.445339271Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T06:28:24.743646676Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T06:35:20.615258334Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T06:41:37.906323741Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T06:48:07.541630278Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T06:54:15.273276132Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T07:01:27.7200898Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T07:07:39.363524209Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T07:13:32.968031639Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T07:19:50.163164875Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T07:25:53.840175535Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T07:32:03.262198685Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T07:39:03.69674625Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T07:44:44.114095216Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T07:51:44.125685713Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T07:58:11.096636387Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T08:05:19.180557839Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T08:11:57.327773792Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T08:18:08.136004678Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T08:24:03.751934089Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T08:30:37.049047415Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T08:37:42.17277057Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T08:44:59.76580182Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T08:52:38.202281421Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T09:00:17.906130324Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T09:06:50.577512796Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T09:14:45.739459539Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T09:21:54.902582003Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T09:27:06.719843197Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T09:34:45.510416998Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T09:40:23.284773728Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T09:47:00.086862812Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T09:52:25.382210326Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T09:58:51.095126099Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T10:04:37.095565768Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T10:10:14.955404115Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T10:16:09.415675079Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T10:23:00.531517379Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T10:29:13.383756323Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T10:34:21.84387763Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T10:40:10.531707373Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T10:45:38.942590068Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T10:50:51.693345861Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T10:58:44.382484239Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T11:06:08.93457575Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T11:13:17.98961616Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T11:20:14.357274545Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T11:26:04.509156776Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T11:34:06.662792852Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T11:41:34.280630446Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T11:47:10.178402827Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T11:53:11.662840477Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T11:59:03.884935469Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T12:04:34.790995274Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T12:10:16.826160137Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T12:16:14.896703907Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T12:22:18.57606797Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T12:29:15.243487923Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.114:53: no such host"
ts=2022-08-02T12:35:30.015969915Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.119:53: no such host"
ts=2022-08-02T12:41:12.947518059Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 192.185.0.112:53: no such host"
ts=2022-08-02T12:47:18.898025711Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T12:54:51.389266423Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T13:00:30.621537831Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T13:07:00.730320387Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T13:13:34.636640752Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.8.8:53: no such host"
ts=2022-08-02T13:19:50.790832191Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T13:27:05.705542152Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T13:34:17.568615508Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T13:39:41.852202679Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.8.8:53: no such host"
ts=2022-08-02T13:45:57.642501758Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T13:51:24.121130738Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T13:56:47.297556783Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T14:04:34.638072046Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T14:11:32.847284482Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.8.8:53: no such host"
ts=2022-08-02T14:19:04.595052554Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T14:25:48.136221241Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T14:32:40.587297986Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T14:38:35.36721804Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T14:44:17.955157485Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T14:50:31.800173358Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T14:56:07.200803358Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T14:59:53.110396435Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="error tailing WAL" err="segments: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal: no such file or directory"
ts=2022-08-02T14:59:58.115884237Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="error tailing WAL" err="wal.Segments: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal: no such file or directory"
[ OK ]
ts=2022-08-02T14:59:59Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-02T15:01:40.013381934Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.8.8:53: no such host"
ts=2022-08-02T15:07:18.283606164Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.8.8:53: no such host"
ts=2022-08-02T15:08:14.407671807Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T15:13:47.085877961Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.8.8:53: no such host"
ts=2022-08-02T15:20:41.812881898Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T15:25:54.465462677Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.8.8:53: no such host"
ts=2022-08-02T15:31:08.786851302Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T15:38:51.577052802Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T15:46:16.782248791Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.8.8:53: no such host"
ts=2022-08-02T15:53:49.665425268Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T16:00:55.281116085Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T16:07:29.310815453Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T16:13:51.94841197Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T16:19:49.576775598Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T16:25:16.541964489Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T16:31:25.368108761Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.8.8:53: no such host"
ts=2022-08-02T16:38:05.76629192Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T16:45:40.976798253Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T16:51:56.635983186Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.8.8:53: no such host"
ts=2022-08-02T16:58:39.701341029Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T17:05:28.437063946Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T17:12:46.399991572Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T17:19:04.139411353Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.8.8:53: no such host"
ts=2022-08-02T17:25:12.479245555Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T17:32:37.758951728Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T17:39:25.340996004Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T17:45:20.050675468Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T17:51:46.524660486Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T17:58:37.479669967Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T18:04:44.285381216Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T18:11:41.974495051Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T18:17:54.883567149Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T18:25:02.094699975Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T18:32:14.00061532Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T18:39:13.479307875Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T18:46:01.092786024Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T18:52:33.061142897Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T18:59:54.707872469Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T19:05:43.066111838Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T19:12:37.762734396Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T19:18:04.468268239Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.8.8:53: no such host"
ts=2022-08-02T19:25:39.730695425Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T19:31:00.7423614Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T19:37:03.025784831Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T19:43:59.62016102Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T19:50:19.804467712Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T19:57:00.277636863Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T20:03:12.76200703Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T20:08:16.859777929Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T20:14:04.306351079Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.8.8:53: no such host"
ts=2022-08-02T20:19:58.819966741Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T20:25:29.671568961Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
ts=2022-08-02T20:33:12.038992613Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 8.8.4.4:53: no such host"
ts=2022-08-02T20:40:09.132515624Z caller=client.go:377 level=error component=logs logs_config=integrations component=client host=logs.grafanacloud.newfold.com msg="final error sending batch" status=-1 error="Post \"https://logs.grafanacloud.newfold.com/loki/api/v1/push\": dial tcp: lookup logs.grafanacloud.newfold.com on 1.1.1.1:53: no such host"
[ OK ]
ts=2022-08-02T20:43:16Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-02T20:43:16.179094709Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-02T20:43:16.179479581Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-02T20:43:21.18851805Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-02T20:43:21.188842172Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
[ OK ]
ts=2022-08-02T20:43:23Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-05T01:09:36.234575223Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=419 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 419 metadata"
ts=2022-08-05T01:09:36.234905891Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=500 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 500 samples and 0 metadata"
[ OK ]
ts=2022-08-05T18:42:48Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-05T18:42:48.227764767Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:42:48.227988853Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:42:53.238649926Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:42:53.238871888Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:42:58.247956761Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:42:58.248264423Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:43:03.339057823Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:43:03.339387919Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:43:08.354039734Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:43:08.354387956Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:43:13.362800782Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:43:13.363040699Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:43:18.36887613Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:43:18.369099737Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:43:23.378419858Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:43:23.378595362Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:43:28.388246007Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:43:28.388511173Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:43:33.394973714Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:43:33.395261623Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:43:38.403620154Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:43:38.403904446Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:43:43.409388906Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:43:43.409621198Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:43:48.415783779Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:43:48.415970681Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:43:53.433913464Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:43:53.434202725Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:43:58.44925203Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:43:58.449571876Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:44:03.4584318Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:44:03.458795867Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:44:08.465041822Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:44:08.465226038Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:44:13.471282361Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:44:13.471540205Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:44:18.484277155Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:44:18.484649646Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:44:23.49359501Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:44:23.4940898Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:44:28.499157783Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:44:28.499370654Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:44:33.509252799Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:44:33.509455923Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:44:38.515238474Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:44:38.515437056Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:44:43.520856384Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:44:43.521062614Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:44:48.530446829Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:44:48.530667279Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:44:53.537468443Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:44:53.537833122Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:44:58.544570329Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:44:58.544791101Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:45:03.551125756Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:45:03.55128145Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:45:08.558362373Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:45:08.558537756Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:45:13.567469633Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:45:13.567690445Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:45:18.57410426Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:45:18.574518248Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:45:23.579474465Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:45:23.579690054Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:45:28.591001874Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:45:28.591301261Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:45:33.612618007Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:45:33.612970202Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:45:38.619320965Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:45:38.61958626Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:45:43.628249905Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:45:43.628469408Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:45:48.633895133Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:45:48.634074752Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:45:53.643468325Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:45:53.643730624Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:45:58.650556217Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:45:58.650785205Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:46:03.658248755Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:46:03.65847022Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:46:08.665187203Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:46:08.665370331Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:46:13.672226241Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:46:13.672506797Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:46:18.684100629Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:46:18.684304461Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:46:23.691790491Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:46:23.692015089Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:46:28.698343936Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:46:28.698539793Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:46:33.706436813Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:46:33.706618712Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:46:38.712424871Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:46:38.71266572Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:46:43.72099701Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:46:43.721191093Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:46:48.727222675Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:46:48.727404643Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:46:53.733112545Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:46:53.733299222Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:46:58.739251344Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:46:58.73944974Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:47:03.746687651Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:47:03.747100257Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:47:08.767363495Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:47:08.767699258Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:47:13.774108745Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:47:13.774272146Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:47:18.779843241Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:47:18.78004462Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:47:23.786184121Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:47:23.786428169Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:47:28.793820073Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:47:28.794215665Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:47:33.801090155Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:47:33.801269418Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:47:38.806884874Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:47:38.807069988Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:47:43.815358323Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:47:43.81560887Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:47:48.824338927Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:47:48.824630723Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:47:53.834336804Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:47:53.834799171Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:47:58.845926369Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:47:58.923821273Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:48:03.934097679Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:48:03.934367024Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:48:08.944003064Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:48:08.944204728Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:48:13.953082185Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:48:13.953257006Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:48:18.960090124Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:48:18.96030434Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:48:23.970335317Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:48:23.97055688Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:48:28.976558091Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:48:28.976791014Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:48:33.98658907Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:48:33.986822865Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:48:38.994531412Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:48:39.007366944Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:48:44.014253214Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:48:44.014497967Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:48:49.024567001Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:48:49.024781554Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:48:54.03026943Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:48:54.030463011Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:48:59.0372437Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:48:59.037446381Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:49:04.048515569Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:49:04.048841603Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:49:09.061029957Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:49:09.06141676Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:49:14.072930925Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:49:14.073271848Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:49:19.084083676Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:49:19.084452572Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:49:24.093106034Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:49:24.093276628Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:49:29.105553353Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:49:29.105865936Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:49:34.117233205Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:49:34.117443575Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:49:39.124646024Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:49:39.124998601Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:49:44.131217753Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:49:44.131366795Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:49:49.137731235Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:49:49.137922343Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:49:54.14437493Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:49:54.161017814Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:49:59.167890454Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:49:59.168067938Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:50:04.178140564Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:50:04.178430612Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:50:09.185340532Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:50:09.185566285Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:50:14.191809515Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:50:14.19200038Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:50:19.199140955Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:50:19.19942678Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:50:24.206122286Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:50:24.206299237Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:50:29.213700119Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:50:29.213917874Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:50:34.221033883Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:50:34.221269246Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:50:39.230022854Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:50:39.230274651Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:50:44.237278439Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:50:44.237447817Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:50:49.244339301Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:50:49.244527711Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:50:54.252175797Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:50:54.252427176Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:50:59.259123984Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:50:59.259288304Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:51:04.274067942Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:51:04.274372304Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:51:09.281091234Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:51:09.281312555Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:51:14.28856904Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:51:14.289057208Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:51:19.296120696Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:51:19.296279401Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:51:24.303463629Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:51:24.303674599Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:51:29.311454085Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:51:29.311726438Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:51:34.322201593Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:51:34.322541665Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:51:39.328892096Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:51:39.329073109Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:51:44.340628696Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:51:44.34099226Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:51:49.349106675Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:51:49.349305718Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:51:54.365313231Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:51:54.365675245Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:51:59.373770125Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:51:59.373988685Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:52:04.38123374Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:52:04.381447112Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:52:09.395939376Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:52:09.396316344Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:52:14.402728984Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:52:14.402911089Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:52:19.413277922Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:52:19.413594773Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:52:24.421099731Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:52:24.421255535Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:52:29.443818016Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:52:29.44403147Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:52:34.451476068Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:52:34.451664466Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:52:39.45870525Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:52:39.458929087Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:52:44.46951916Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:52:44.469776677Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:52:49.481376113Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:52:49.481797896Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:52:54.488839952Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:52:54.489010062Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:52:59.497193417Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:52:59.497384896Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:53:04.509066149Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:53:04.509430799Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:53:09.520211026Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:53:09.520504967Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:53:14.533201975Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:53:14.533386297Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:53:19.54588066Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:53:19.546258344Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:53:24.559143915Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:53:24.559650501Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:53:29.567809903Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:53:29.567990092Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:53:34.575869929Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:53:34.576067429Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:53:39.585566583Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:53:39.585771845Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:53:44.594316101Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:53:44.594499523Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:53:49.6041866Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:53:49.604531326Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:53:54.612951092Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:53:54.613298412Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:53:59.621863205Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:53:59.622174618Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:54:04.636072962Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:54:04.636609631Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:54:09.649133619Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:54:09.649481661Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:54:14.663139987Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:54:14.663465844Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:54:19.670830959Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:54:19.671029681Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:54:24.681120021Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:54:24.681323456Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:54:29.689119192Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:54:29.689533208Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:54:34.70319323Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:54:34.70358298Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:54:39.712278938Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:54:39.712459026Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:54:44.726517753Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:54:44.726905148Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:54:49.738868022Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:54:49.739079618Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:54:54.747095497Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:54:54.74728932Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:54:59.755955787Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:54:59.756246125Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:55:04.768791021Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:55:04.769067533Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:55:09.776945852Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:55:09.777120752Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:55:14.78518711Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:55:14.785366452Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:55:19.79235851Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:55:19.792611727Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:55:24.803643434Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:55:24.803841226Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:55:29.818015765Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:55:29.818292859Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:55:34.827630432Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:55:34.827835624Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:55:39.835502116Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:55:39.835768868Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:55:44.844862425Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:55:44.845085237Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:55:49.859949447Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:55:49.860381792Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:55:54.870210429Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:55:54.870406547Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:55:59.893022323Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:55:59.893304412Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:56:04.909319085Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:56:04.909622816Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:56:09.921144051Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:56:09.921366931Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:56:14.933352922Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:56:14.933555336Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:56:19.943874217Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:56:19.944095939Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:56:24.953775104Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:56:24.953986363Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:56:29.963693704Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:56:29.963927997Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:56:34.974513826Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:56:34.974728911Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:56:39.985557564Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:56:39.985769331Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:56:44.994560164Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:56:44.994804197Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:56:50.004487043Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:56:50.004822537Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:56:55.01496434Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:56:55.015175014Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:57:00.025523203Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:57:00.025855088Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:57:05.03481122Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:57:05.035020714Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:57:10.042562987Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:57:10.042804723Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:57:15.052017719Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:57:15.052241886Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:57:20.065783187Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:57:20.06598348Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:57:25.077663323Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:57:25.103426696Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:57:30.111823965Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:57:30.112027278Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:57:35.123174939Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:57:35.123367247Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:57:40.133904143Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:57:40.134091019Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:57:45.143506474Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:57:45.143749803Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:57:50.163176193Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:57:50.173231052Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:57:55.182493433Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:57:55.182730225Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:58:00.198554846Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:58:00.198741957Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:58:05.207977557Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:58:05.2081858Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:58:10.216693135Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:58:10.217013865Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:58:15.226503142Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:58:15.226704446Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:58:20.239027181Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:58:20.23924709Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:58:25.248687619Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:58:25.248879276Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:58:30.25885729Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:58:30.25915259Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:58:35.268707771Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:58:35.268899758Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:58:40.280635278Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:58:40.281084138Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:58:45.294171427Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:58:45.294395016Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:58:50.304074747Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:58:50.30444885Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:58:55.314056408Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:58:55.314630067Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:59:00.324195328Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:59:00.324413231Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:59:05.333155736Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:59:05.333434165Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:59:10.342829388Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:59:10.343047216Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:59:15.353550969Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:59:15.353765634Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:59:20.378469408Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:59:20.37885087Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:59:25.390282088Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:59:25.390499907Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:59:30.40517137Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:59:30.405444268Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:59:35.414948462Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:59:35.415159449Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:59:40.430760172Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:59:40.431136889Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:59:45.441485222Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:59:45.441841642Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:59:50.455368364Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:59:50.455730463Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T18:59:55.464401668Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T18:59:55.464599916Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:00:00.478997616Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:00:00.479420459Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:00:05.489688886Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:00:05.489906753Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:00:10.515396341Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:00:10.515605891Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:00:15.548241199Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:00:15.548550552Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:00:20.558624522Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:00:20.558908887Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:00:25.569074268Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:00:25.569408783Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:00:30.58219266Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:00:30.582908428Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:00:35.592415686Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:00:35.592616068Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:00:40.60757747Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:00:40.607941532Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:00:45.620030547Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:00:45.620215701Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:00:50.646914821Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:00:50.647301612Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:00:55.665588161Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:00:55.665884573Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:01:00.67892585Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:01:00.679178059Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:01:05.692549343Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:01:05.692886675Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:01:10.704804433Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:01:10.705186983Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:01:15.740621886Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:01:15.741009086Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:01:20.7569925Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:01:20.757368627Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:01:25.772191951Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:01:25.772461007Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:01:30.834452425Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:01:30.834905858Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:01:35.858094437Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:01:35.858651902Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:01:40.920752009Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:01:40.921076854Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:01:45.945341486Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:01:45.945765459Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:01:50.969765693Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:01:50.97020686Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:01:55.987253057Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:01:55.987455566Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:02:01.013859084Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:02:01.014256308Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:02:06.036622299Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:02:06.036959736Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:02:11.070961565Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:02:11.071390194Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:02:16.130512837Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:02:16.130817072Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:02:21.161542391Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:02:21.16489085Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:02:26.183178337Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:02:26.183663279Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:02:31.199942198Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:02:31.20045445Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:02:36.217472827Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:02:36.21836162Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:02:41.234916951Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:02:41.235384705Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:02:46.253364497Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:02:46.253821073Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:02:51.263625959Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:02:51.26393885Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:02:56.278626771Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:02:56.278967203Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:03:01.309347545Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:03:01.309806156Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:03:06.328681033Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:03:06.329026304Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:03:11.353099976Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:03:11.353375646Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:03:16.366080532Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:03:16.366306828Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:03:21.381861846Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:03:21.382202877Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:03:26.394849196Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:03:26.395122328Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:03:31.415870785Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:03:31.416265114Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:03:36.430733159Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:03:36.430963849Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:03:41.445779196Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:03:41.446040816Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:03:46.456929935Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:03:46.457195247Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:03:51.487090272Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:03:51.487305596Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:03:56.500887436Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:03:56.501101573Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:04:01.523276955Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:04:01.523804111Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:04:06.536449227Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:04:06.536635681Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:04:11.555880258Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:04:11.556264774Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:04:16.574334065Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:04:16.574881052Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:04:21.58806848Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:04:21.58836455Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:04:26.605173087Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:04:26.605635544Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:04:31.614922039Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:04:31.615168914Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:04:36.632991641Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:04:36.65980599Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:04:41.678016855Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:04:41.678327625Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:04:46.691620177Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:04:46.69180837Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:04:51.701364258Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:04:51.70158878Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:04:56.713588139Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:04:56.713800837Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:05:01.737729851Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:05:01.738127483Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:05:06.749840017Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:05:06.750068532Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:05:11.763374905Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:05:11.763687257Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:05:16.776624019Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:05:16.776952355Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:05:21.788193907Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:05:21.788412344Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:05:26.803142664Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:05:26.803430173Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:05:31.813351118Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:05:31.813599961Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:05:36.845980074Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:05:36.846182362Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:05:41.86332095Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:05:41.863650783Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:05:46.876757519Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:05:46.876971895Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:05:51.895875824Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:05:51.896100199Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:05:56.90881468Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:05:56.909033319Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:06:01.927616632Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:06:01.927916479Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:06:06.940230329Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:06:06.940480138Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:06:11.956266069Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:06:11.956542146Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:06:16.966770531Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:06:16.966971143Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:06:21.97874914Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:06:21.978992992Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:06:26.994919551Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:06:26.995160589Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:06:32.007400891Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:06:32.007574275Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:06:37.026593015Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:06:37.026972175Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:06:42.040506833Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:06:42.040744274Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:06:47.053755828Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:06:47.054053774Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:06:52.065620347Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:06:52.065922832Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:06:57.077966217Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:06:57.078196276Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:07:02.090833876Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:07:02.091068942Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:07:07.106257535Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:07:07.106792803Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:07:12.126276934Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:07:12.126606255Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:07:17.139074119Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:07:17.13928111Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:07:22.153564123Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:07:22.154015744Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:07:27.167843593Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:07:27.168119806Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:07:32.179341012Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:07:32.179539793Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:07:37.191672152Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:07:37.191883704Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:07:42.204957693Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:07:42.205188573Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:07:47.21774708Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:07:47.217969034Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:07:52.230418121Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:07:52.230631193Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:07:57.248000309Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:07:57.248363036Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:08:02.276461318Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:08:02.276822211Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:08:07.289226389Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:08:07.289468301Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:08:12.300874953Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:08:12.301083567Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:08:17.322395152Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:08:17.351827185Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:08:22.372543336Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:08:22.372887988Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:08:27.38520792Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:08:27.385538322Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:08:32.394962931Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:08:32.395184815Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:08:37.418728061Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:08:37.419156833Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:08:42.431405706Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:08:42.431634969Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:08:47.447290046Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:08:47.447594567Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:08:52.461106571Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:08:52.461281976Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:08:57.474689472Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:08:57.47492266Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:09:02.489775381Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:09:02.49006439Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:09:07.501914739Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:09:07.502172624Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:09:12.518215205Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:09:12.518861568Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:09:17.531283767Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:09:17.531546365Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:09:22.544103649Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:09:22.544313289Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:09:27.555400595Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:09:27.555658756Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:09:32.568410293Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:09:32.568574002Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:09:37.586140623Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:09:37.58641798Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:09:42.599165262Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:09:42.599422192Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:09:47.613667137Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:09:47.613894718Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:09:52.627449104Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:09:52.627752625Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:09:57.64934401Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:09:57.649772657Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:10:02.666702265Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:10:02.667125848Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:10:07.685858992Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:10:07.686115314Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:10:12.701064125Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:10:12.701276219Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:10:17.724021784Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:10:17.724309188Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:10:22.73995635Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:10:22.740165943Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:10:27.75655434Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:10:27.756821408Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:10:32.771150753Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:10:32.771321687Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:10:37.785804981Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:10:37.786015611Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:10:42.810464795Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:10:42.810897422Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:10:47.826863871Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:10:47.827122242Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:10:52.848404051Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:10:52.848583034Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:10:57.862539949Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:10:57.862831121Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:11:02.87583158Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:11:02.876125647Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:11:07.894770429Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:11:07.894960632Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:11:12.915134951Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:11:12.915339979Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:11:17.928991392Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:11:17.929205673Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:11:22.943538815Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:11:22.94388309Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:11:27.958245901Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:11:27.958430904Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:11:32.973267592Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:11:33.035063893Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:11:38.055531112Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:11:38.056152002Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:11:43.066420764Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:11:43.066795097Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:11:48.082146602Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:11:48.082457203Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:11:53.11423863Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:11:53.114648739Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:11:58.131357728Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:11:58.131782737Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:12:03.14447759Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:12:03.144682095Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:12:08.156785661Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:12:08.156971095Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:12:13.167115543Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:12:13.167358755Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:12:18.177564347Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:12:18.17784067Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:12:23.19043101Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:12:23.190633103Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:12:28.202888442Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:12:28.203202623Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:12:33.213804233Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:12:33.214022215Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:12:38.228998094Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:12:38.230014536Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:12:43.239744236Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:12:43.239951603Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:12:48.250082382Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:12:48.250327687Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:12:53.262312943Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:12:53.26255609Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:12:58.274230159Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:12:58.274534921Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:13:03.28442958Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:13:03.284655412Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:13:08.302999174Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:13:08.303294103Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:13:13.313247718Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:13:13.313515411Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:13:18.324959772Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:13:18.325177574Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:13:23.340346817Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:13:23.340567844Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:13:28.357553112Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:13:28.35821596Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:13:33.367940996Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:13:33.36815574Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:13:38.380577697Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:13:38.380839063Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:13:43.398345703Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:13:43.398633962Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:13:48.4172196Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:13:48.417733849Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:13:53.434653856Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:13:53.435125133Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:13:58.454767291Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:13:58.45510943Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:14:03.469649826Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:14:03.470069415Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:14:08.479637903Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:14:08.479830429Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:14:13.497957611Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:14:13.498207356Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:14:18.510869696Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:14:18.511091353Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:14:23.521862701Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:14:23.522069579Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:14:28.537054129Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:14:28.537357902Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:14:33.550960072Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:14:33.551267291Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:14:38.561776872Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:14:38.561998928Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:14:43.572142246Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:14:43.5724279Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:14:48.583462243Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:14:48.583826543Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:14:53.593670985Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:14:53.593922867Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:14:58.604150474Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:14:58.604323505Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:15:03.61981584Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:15:03.621021889Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:15:08.632594909Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:15:08.632929743Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:15:13.650525635Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:15:13.650937861Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:15:18.661494752Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:15:18.661728997Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:15:23.673494532Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:15:23.673832317Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:15:28.685775235Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:15:28.685990027Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:15:33.696521342Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:15:33.696730094Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:15:38.713041184Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:15:38.713386686Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:15:43.730570776Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:15:43.731052079Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:15:48.74044459Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:15:48.740657344Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:15:53.752162804Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:15:53.752386118Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:15:58.769058458Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:15:58.769423863Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:16:03.779267822Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:16:03.779460127Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:16:08.791262424Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:16:08.79151415Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:16:13.802930804Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:16:13.803182024Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:16:18.820467254Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:16:18.820668742Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:16:23.841848391Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:16:23.842275383Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:16:28.854707338Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:16:28.855011928Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:16:33.867700982Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:16:33.867960942Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:16:38.881303238Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:16:38.881490945Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:16:43.904909452Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:16:43.905294242Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:16:48.919917661Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:16:48.920160226Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:16:53.938485511Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:16:53.938959232Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:16:58.955256612Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:16:58.95546976Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:17:03.977409036Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:17:03.978287176Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:17:08.990894014Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:17:08.99112284Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:17:14.003363827Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:17:14.00361916Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:17:19.014464334Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:17:19.014634102Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:17:24.030526669Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:17:24.030777077Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:17:29.041152905Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:17:29.041341102Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:17:34.052448427Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:17:34.052660087Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:17:39.076531352Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:17:39.076766721Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:17:44.086853053Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:17:44.087090235Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:17:49.100182028Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:17:49.100533703Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:17:54.110373431Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:17:54.110551275Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:17:59.125225787Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:17:59.125617335Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:18:04.13570105Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:18:04.1359669Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:18:09.152873501Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:18:09.153088299Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:18:14.164672576Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:18:14.164923483Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:18:19.178898511Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:18:19.179204479Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:18:24.190726799Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:18:24.190949376Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:18:29.203583689Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:18:29.203813457Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:18:34.219934285Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:18:34.220133685Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:18:39.238636197Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:18:39.239065197Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:18:44.250681631Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:18:44.251008828Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:18:49.262337362Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:18:49.262523573Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:18:54.273835822Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:18:54.274068126Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:18:59.291003964Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:18:59.291309963Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:19:04.302604654Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:19:04.302836175Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:19:09.313962124Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:19:09.314259119Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:19:14.326794784Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:19:14.327111624Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:19:19.338010781Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:19:19.338205091Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:19:24.349106262Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:19:24.349297102Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:19:29.360851018Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:19:29.361188269Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:19:34.384825647Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:19:34.385210005Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:19:39.396161352Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:19:39.396395673Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:19:44.414827949Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:19:44.415254616Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:19:49.429350918Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:19:49.429669418Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:19:54.440334629Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:19:54.440584037Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:19:59.451785741Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:19:59.45203947Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:20:04.462776963Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:20:04.463040711Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:20:09.477578812Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:20:09.477912231Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:20:14.49067393Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:20:14.490887299Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:20:19.502639542Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:20:19.503139882Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:20:24.51906321Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:20:24.519603145Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:20:29.535179069Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:20:29.535424345Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:20:34.558294306Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:20:34.558630181Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:20:39.570900504Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:20:39.571081636Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:20:44.588006887Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:20:44.588263517Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:20:49.606546072Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:20:49.60687306Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:20:54.621910411Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:20:54.622133256Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:20:59.642653767Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:20:59.643047272Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:21:04.654635313Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:21:04.654881155Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:21:09.67664531Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:21:09.676987579Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:21:14.689104478Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:21:14.68930306Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:21:19.702365501Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:21:19.702604088Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:21:24.720823193Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:21:24.721132516Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:21:29.738974098Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:21:29.739208752Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:21:34.752248852Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:21:34.752461505Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:21:39.790038536Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:21:39.790455959Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:21:44.804444252Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:21:44.804664829Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:21:49.816920725Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:21:49.817301702Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:21:54.830427861Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:21:54.830670576Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:21:59.850323648Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:21:59.850744942Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:22:04.870511256Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:22:04.870883997Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:22:09.889754046Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:22:09.890019578Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:22:14.904812648Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:22:14.905055741Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:22:19.91693671Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:22:19.91711091Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:22:24.928497888Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:22:24.928694031Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:22:29.941150469Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:22:29.9413654Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:22:34.951381627Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:22:34.951577558Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:22:39.963031962Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:22:39.963234731Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:22:44.974543964Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:22:44.97478617Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:22:49.987517341Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:22:49.987676535Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:22:55.000368482Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:22:55.000561046Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:23:00.011953744Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:23:00.012108452Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:23:05.023561618Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:23:05.023800498Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:23:10.035314135Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:23:10.03550626Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:23:15.051420965Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:23:15.051666773Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:23:20.063071962Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:23:20.063295777Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:23:25.083704705Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:23:25.084161675Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:23:30.101923652Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:23:30.102146727Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:23:35.120291256Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:23:35.120613435Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:23:40.13158993Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:23:40.131794409Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:23:45.194626069Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:23:45.194848756Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:23:50.206584458Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:23:50.206786823Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:23:55.220363721Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:23:55.220659154Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:24:00.23768829Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:24:00.237959301Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:24:05.258118923Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:24:05.258475789Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:24:10.277699037Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:24:10.277971167Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:24:15.302692762Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:24:15.303197699Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:24:20.3184954Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:24:20.318805786Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:24:25.334373271Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:24:25.334598243Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:24:30.366372133Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:24:30.366652443Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:24:35.395038113Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:24:35.395440034Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:24:40.411883559Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:24:40.412122729Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:24:45.427646918Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:24:45.427879113Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:24:50.442929522Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:24:50.443280708Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:24:55.468799897Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:24:55.469279722Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:25:00.482702456Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:25:00.482938585Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:25:05.495643817Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:25:05.495894105Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:25:10.508901862Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:25:10.509097715Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:25:15.522484216Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:25:15.522729678Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:25:20.536256409Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:25:20.536475446Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:25:25.553110689Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:25:25.553324427Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:25:30.570107892Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:25:30.570370301Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:25:35.587799729Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:25:35.588047136Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:25:40.611991597Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:25:40.612389891Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:25:45.633258251Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:25:45.633456068Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:25:50.644326716Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:25:50.644564289Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:25:55.665587682Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:25:55.665973547Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:26:00.678583653Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:26:00.678851461Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:26:05.691050511Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:26:05.691295302Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:26:10.702809747Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:26:10.703014225Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:26:15.714687127Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:26:15.715085996Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:26:20.732502638Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:26:20.732686179Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:26:25.756041625Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:26:25.756400218Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:26:30.770637753Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:26:30.77466515Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:26:35.796587207Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:26:35.796796891Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:26:40.81149127Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:26:40.811676933Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:26:45.827729563Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:26:45.827959769Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:26:50.867591666Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:26:50.868036808Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:26:55.881890422Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:26:55.8821134Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:27:00.900731671Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:27:00.901004891Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:27:05.922638565Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:27:05.923087176Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:27:10.94396362Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:27:10.944371764Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:27:15.957219513Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:27:15.957393729Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:27:20.980656603Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:27:20.981019431Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:27:25.997793027Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:27:25.997982173Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:27:31.01256007Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:27:31.012749138Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:27:36.027243493Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:27:36.027457167Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:27:41.047430752Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:27:41.047924702Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:27:46.062506823Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:27:46.062688229Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:27:51.077544336Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:27:51.077816593Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:27:56.096237412Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:27:56.096499487Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:28:01.113555563Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:28:01.113843738Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:28:06.129807Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:28:06.130174543Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:28:11.147335818Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:28:11.147646342Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:28:16.183054686Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:28:16.183555388Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:28:21.197327172Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:28:21.197559048Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:28:26.210803598Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:28:26.211053698Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:28:31.225336427Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:28:31.225674788Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:28:36.242265642Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:28:36.242540335Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:28:41.259153379Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:28:41.259385491Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:28:46.275988373Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:28:46.276234286Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:28:51.291282296Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:28:51.291562758Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:28:56.313322591Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:28:56.31372655Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:29:01.327762686Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:29:01.327982841Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:29:06.340221196Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:29:06.34041622Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:29:11.35438571Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:29:11.354681553Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:29:16.373300513Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:29:16.373511351Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:29:21.391522391Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:29:21.391778682Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:29:26.406048754Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:29:26.406273616Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:29:31.422689599Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:29:31.422956227Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:29:36.438058933Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:29:36.43845741Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:29:41.452314067Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:29:41.452531248Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:29:46.468575333Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:29:46.468966511Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:29:51.484140189Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:29:51.48438122Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:29:56.498235342Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:29:56.498423965Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:30:01.522158817Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:30:01.522520338Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:30:06.553234942Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:30:06.553757388Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:30:11.567694277Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:30:11.567940433Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:30:16.590195821Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:30:16.590620258Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:30:21.607761781Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:30:21.608125226Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:30:26.622096627Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:30:26.622356367Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:30:31.639735462Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:30:31.639944002Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:30:36.66237809Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:30:36.662638866Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:30:41.676691524Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:30:41.676982504Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:30:46.700572505Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:30:46.700825495Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:30:51.715395756Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:30:51.715608754Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:30:56.730221734Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:30:56.730443996Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:31:01.753365757Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:31:01.753783232Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:31:06.776401254Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:31:06.776789442Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:31:11.789030359Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:31:11.789288149Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:31:16.804016675Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:31:16.804241357Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:31:21.822247261Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:31:21.822567095Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:31:26.837479091Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:31:26.837681432Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:31:31.858633031Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:31:31.859073116Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:31:36.8726129Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:31:36.872861529Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:31:41.884702697Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:31:41.884899161Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:31:46.899394996Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:31:46.899628549Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:31:51.916849522Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:31:51.917070556Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:31:56.931072405Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:31:56.931264061Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:32:01.959137151Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:32:01.959556736Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:32:06.987311356Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:32:06.98773072Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:32:12.005643898Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:32:12.005868138Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:32:17.022011051Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:32:17.022240846Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:32:22.037080117Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:32:22.037232582Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:32:27.052294285Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:32:27.052474029Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:32:32.06783481Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:32:32.068012781Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:32:37.082538242Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:32:37.082761504Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:32:42.096602314Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:32:42.096797595Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:32:47.11122166Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:32:47.111399513Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:32:52.12694216Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:32:52.127178827Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:32:57.155047771Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:32:57.155396162Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:33:02.175499176Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:33:02.175886187Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:33:07.196075934Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:33:07.196603333Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:33:12.21598399Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:33:12.216493074Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:33:17.234199886Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:33:17.234703642Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:33:22.255728612Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:33:22.255984153Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:33:27.274535497Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:33:27.27486308Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:33:32.293113286Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:33:32.293383325Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:33:37.333636949Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:33:37.333897369Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:33:42.352268976Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:33:42.352452359Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:33:47.371929398Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:33:47.372173497Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:33:52.391377757Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:33:52.391611605Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:33:57.411239839Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:33:57.411464757Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:34:02.431288622Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:34:02.431504603Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:34:07.44819631Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:34:07.448403599Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:34:12.466389658Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:34:12.466591658Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:34:17.489744051Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:34:17.490116276Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:34:22.50493711Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:34:22.505137507Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:34:27.523847879Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:34:27.524093974Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:34:32.549559224Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:34:32.549867098Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:34:37.566507084Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:34:37.566826576Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:34:42.585525684Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:34:42.585744314Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:34:47.599986608Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:34:47.600222077Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:34:52.639804987Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:34:52.650911017Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:34:57.685341149Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:34:57.685771291Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:35:02.714383606Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:35:02.714775484Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:35:07.751131577Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:35:07.751632582Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:35:12.777915184Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:35:12.778299762Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:35:17.801431961Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:35:17.80166114Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:35:22.817741695Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:35:22.817959521Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:35:27.833506522Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:35:27.83375092Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:35:32.850420923Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:35:32.850647909Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:35:37.865572778Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:35:37.865771413Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:35:42.883448723Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:35:42.883648158Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:35:47.900983166Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:35:47.901184079Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:35:52.928504165Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:35:52.928893509Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:35:57.944927655Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:35:57.945130004Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:36:02.966856041Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:36:02.967210667Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:36:07.983454105Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:36:07.983676317Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:36:13.016221022Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:36:13.016512692Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:36:18.048764798Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:36:18.04911172Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:36:23.071487889Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:36:23.071894149Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:36:28.085427567Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:36:28.085657382Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:36:33.099171972Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:36:33.099451391Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:36:38.120511064Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:36:38.120679226Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:36:43.133094083Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:36:43.13329284Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:36:48.146493102Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:36:48.146744197Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:36:53.159057155Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:36:53.159281763Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:36:58.177589844Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:36:58.177882535Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:37:03.190057273Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:37:03.190272925Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:37:08.202937674Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:37:08.203160852Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:37:13.218813562Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:37:13.219028818Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:37:18.232848015Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:37:18.233082229Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:37:23.246381237Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:37:23.246758229Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:37:28.259627176Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:37:28.259895934Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:37:33.272380775Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:37:33.27259203Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:37:38.287620008Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:37:38.287984703Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:37:43.303084435Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:37:43.30335005Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:37:48.31610278Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:37:48.316325456Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:37:53.330302039Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:37:53.330495234Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:37:58.343642304Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:37:58.343910795Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:38:03.370016069Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:38:03.370354364Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:38:08.391088417Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:38:08.391462004Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:38:13.40936492Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:38:13.409617188Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:38:18.422701792Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:38:18.422975905Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:38:23.436072913Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:38:23.436308254Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:38:28.452357763Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:38:28.452634695Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:38:33.473798964Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:38:33.47410104Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:38:38.487152732Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:38:38.487369223Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:38:43.503393479Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:38:43.503636888Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:38:48.517117434Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:38:48.517358975Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:38:53.530996012Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:38:53.531225946Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:38:58.555465516Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:38:58.555808097Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:39:03.573514535Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:39:03.573766461Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:39:08.587406255Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:39:08.587607763Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:39:13.611035072Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:39:13.611478145Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:39:18.625158798Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:39:18.625365027Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:39:23.639916469Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:39:23.6402911Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:39:28.655734115Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:39:28.656030423Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:39:33.668519138Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:39:33.668781111Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:39:38.684413746Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:39:38.684612875Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:39:43.704916612Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:39:43.705104362Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:39:48.727262893Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:39:48.72761935Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:39:53.74146781Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:39:53.741703904Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:39:58.757120961Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:39:58.757332561Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:40:03.776684058Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:40:03.776942233Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:40:08.798969306Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:40:08.799274955Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:40:13.813955594Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:40:13.814288468Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:40:18.827995071Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:40:18.828219706Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:40:23.841370861Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:40:23.841570062Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:40:28.855198985Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:40:28.855419592Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:40:33.871591269Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:40:33.871854438Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:40:38.886285088Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:40:38.886502994Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:40:43.901324695Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:40:43.901526598Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:40:48.922839491Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:40:48.923284142Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:40:53.938126821Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:40:53.938368541Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:40:58.951919887Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:40:58.95212575Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:41:03.966060306Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:41:03.966300127Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:41:08.978904682Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:41:08.979117951Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:41:14.005077755Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:41:14.005426282Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:41:19.019464251Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:41:19.019687198Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:41:24.033033598Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:41:24.0756719Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:41:29.091504121Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:41:29.09174578Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:41:34.105825942Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:41:34.106038Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:41:39.119270544Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:41:39.119488581Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:41:44.133890879Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:41:44.134157023Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:41:49.147868871Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:41:49.148030718Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:41:54.171656702Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:41:54.172093012Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:41:59.186070672Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:41:59.186293004Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:42:04.199856379Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:42:04.200104387Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:42:09.213738912Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:42:09.214030069Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:42:14.23613607Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:42:14.236549791Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:42:19.25897383Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:42:19.259291649Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:42:24.276826165Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:42:24.277296321Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:42:29.291123969Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:42:29.291378501Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:42:34.315149315Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:42:34.315593249Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:42:39.337032838Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:42:39.337231168Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:42:44.357472398Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:42:44.357690653Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:42:49.376661777Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:42:49.376992758Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:42:54.41498303Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:42:54.415342626Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:42:59.428330709Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:42:59.428506593Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:43:04.445968199Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:43:04.446181058Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:43:09.462379108Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:43:09.462596899Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:43:14.475846119Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:43:14.476066436Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:43:19.499370788Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:43:19.499792172Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:43:24.516508395Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:43:24.516839968Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:43:29.538536254Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:43:29.538903449Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:43:34.555697844Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:43:34.556022557Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:43:39.569520727Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:43:39.569748114Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:43:44.585012997Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:43:44.585212264Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:43:49.599336014Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:43:49.599520787Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:43:54.61268543Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:43:54.612910302Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:43:59.626942056Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:43:59.627180354Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:44:04.644972995Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:44:04.645458816Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:44:09.660546861Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:44:09.660792235Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:44:14.675442346Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:44:14.675634705Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:44:19.690042939Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:44:19.69021401Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:44:24.70858958Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:44:24.708961601Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:44:29.752513944Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:44:29.752745779Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:44:34.77567918Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:44:34.775918552Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:44:39.792964329Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:44:39.793213766Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:44:44.813891061Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:44:44.814298824Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:44:49.833464146Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:44:49.83373868Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:44:54.851624412Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:44:54.851824441Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:44:59.870427487Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:44:59.870691019Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:45:04.889275799Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:45:04.889535273Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:45:09.90989776Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:45:09.910174564Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:45:14.927979669Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:45:14.92824533Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:45:19.946582395Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:45:19.946774886Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:45:24.965592198Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:45:24.965820443Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:45:29.98576905Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:45:29.986068132Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:45:35.003524175Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:45:35.003771775Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:45:40.023699869Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:45:40.024042511Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:45:45.041257955Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:45:45.041633923Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:45:50.059306061Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:45:50.059660956Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:45:55.078096173Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:45:55.078316879Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:46:00.095444281Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:46:00.095688334Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:46:05.114590797Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:46:05.114923564Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:46:10.132938485Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:46:10.133178819Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:46:15.153661653Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:46:15.154081217Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:46:20.175978633Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:46:20.176201017Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:46:25.192685551Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:46:25.192922799Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:46:30.217913528Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:46:30.218269667Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:46:35.234387135Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:46:35.234651772Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:46:40.24908195Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:46:40.249307276Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:46:45.26425488Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:46:45.264467278Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:46:50.28380966Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:46:50.284086782Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:46:55.302314734Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:46:55.302611459Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:47:00.316772483Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:47:00.31695812Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:47:05.33085445Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:47:05.331034945Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:47:10.346252484Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:47:10.346436933Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:47:15.36266695Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:47:15.363181024Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:47:20.378595737Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:47:20.378825984Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:47:25.398953185Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:47:25.399321625Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:47:30.416057017Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:47:30.416252024Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:47:35.433915794Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:47:35.434212951Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:47:40.450407976Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:47:40.450615032Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:47:45.466414056Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:47:45.466638194Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:47:50.485268017Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:47:50.485638438Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:47:55.504215769Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:47:55.504637405Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:48:00.52860044Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:48:00.528970332Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:48:05.54444298Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:48:05.544675543Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:48:10.561060011Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:48:10.561276138Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:48:15.577659133Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:48:15.624027112Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:48:20.638363618Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:48:20.638583652Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:48:25.657519319Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:48:25.657957659Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:48:30.675596227Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:48:30.67585453Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:48:35.700307708Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:48:35.731262467Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:48:40.748318634Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:48:40.748546275Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:48:45.765572595Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:48:45.765791304Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:48:50.783517494Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:48:50.78383692Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:48:55.800619476Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:48:55.800829073Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:49:00.818179241Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:49:00.818399802Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:49:05.835473518Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:49:05.835746685Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:49:10.852259927Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:49:10.852569205Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
ts=2022-08-05T19:49:15.868828996Z caller=instance.go:307 level=error agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory"
ts=2022-08-05T19:49:15.869038993Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal/00000000: no such file or directory" backoff=5s instance=5d4cc2bfce37a12c9e0ae208e7fc37c2
[ OK ]
ts=2022-08-05T19:49:20Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-05T19:59:21.35095312Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-05T19:59:21.73741394Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=419 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 419 metadata"
ts=2022-08-05T20:01:43.886975152Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-05T20:01:43.933636058Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=419 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 419 metadata"
ts=2022-08-05T20:03:20.374230625Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-05T20:03:48.650939207Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=419 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 419 metadata"
ts=2022-08-05T20:04:53.588918388Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=419 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 419 metadata"
ts=2022-08-05T20:05:20.195590417Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
[ OK ]
ts=2022-08-05T20:29:01Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-05T20:30:01.302157682Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=17 exemplarCount=0 err="context canceled"
ts=2022-08-05T20:30:01.302347912Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush all samples on shutdown" count=2999
ts=2022-08-05T20:31:01.303377916Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush metadata"
ts=2022-08-05T20:31:01.30356202Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="context canceled"
ts=2022-08-05T20:31:01.303872661Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=419 err="context canceled"
[ OK ]
ts=2022-08-05T20:52:09Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-05T20:53:09.078404401Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=23 exemplarCount=0 err="context canceled"
ts=2022-08-05T20:53:09.07867983Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush all samples on shutdown" count=2999
ts=2022-08-05T20:54:09.079992784Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush metadata"
ts=2022-08-05T20:54:09.080203501Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="context canceled"
ts=2022-08-05T20:54:09.080369092Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=171 err="context canceled"
[ OK ]
ts=2022-08-05T22:39:41.453499485Z caller=main.go:57 level=error msg="error creating the agent server entrypoint" err="creating gRPC listener: listen tcp 127.0.0.1:12346: bind: address already in use"
ts=2022-08-05T22:40:13.030222131Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="error tailing WAL" err="segments: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal: no such file or directory"
ts=2022-08-05T22:40:18.031988587Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="error tailing WAL" err="wal.Segments: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal: no such file or directory"
ts=2022-08-05T22:41:10.239634387Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=31 exemplarCount=0 err="context canceled"
ts=2022-08-05T22:41:10.241084573Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=500 exemplarCount=0 err="context canceled"
ts=2022-08-05T22:41:10.241566866Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush all samples on shutdown" count=1999
ts=2022-08-05T22:42:10.24189774Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush metadata"
ts=2022-08-05T22:42:10.242233253Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="context canceled"
ts=2022-08-05T22:42:10.2426071Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=419 err="context canceled"
[ OK ]
ts=2022-08-06T00:42:51Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
[ OK ]
ts=2022-08-06T16:31:32Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-06T16:31:34.479474795Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="error tailing WAL" err="segments: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal: no such file or directory"
ts=2022-08-06T16:31:39.481851546Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="error tailing WAL" err="wal.Segments: open /tmp/grafana-agent-wal/5d4cc2bfce37a12c9e0ae208e7fc37c2/wal: no such file or directory"
ts=2022-08-06T16:32:32.0720326Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=5 exemplarCount=0 err="context canceled"
ts=2022-08-06T16:32:32.072278155Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush all samples on shutdown" count=2999
ts=2022-08-06T16:33:32.072463567Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush metadata"
ts=2022-08-06T16:33:32.072670373Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="context canceled"
ts=2022-08-06T16:33:32.07285937Z caller=dedupe.go:112 agent=prometheus instance=5d4cc2bfce37a12c9e0ae208e7fc37c2 component=remote level=error remote_name=5d4cc2-3f7364 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=173 err="context canceled"
ts=2022-08-06T16:43:46.870338505Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (250000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-06T16:43:50.344801835Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (250000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-06T16:44:52.374674858Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="context canceled"
ts=2022-08-06T16:44:52.37492413Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush all samples on shutdown" count=999
ts=2022-08-06T16:47:21.310742477Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-06T16:38:01.703Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"11\", host=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"system\", technical_service=\"Bluerock\"}"
ts=2022-08-06T16:47:23.162313377Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-06T16:52:02.145866073Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-06T16:38:01.703Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"12\", host=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"user\", technical_service=\"Bluerock\"}"
ts=2022-08-06T17:02:58.741302205Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-06T16:44:01.703Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"4\", host=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"steal\", technical_service=\"Bluerock\"}"
[ OK ]
ts=2022-08-06T17:03:30Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-06T17:03:38.355006762Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (166666.66666666666) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-06T17:04:30.227979054Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="context canceled"
ts=2022-08-06T17:04:30.228149263Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=992 exemplarCount=0 err="context canceled"
ts=2022-08-06T17:04:30.228201705Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush all samples on shutdown" count=3991
ts=2022-08-06T17:05:40.398431987Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=982 exemplarCount=0 err="context canceled"
ts=2022-08-06T17:17:03.27232013Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-06T17:13:01.706Z, series={__name__=\"node_cooling_device_max_state\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", name=\"13\", technical_service=\"Bluerock\", type=\"Processor\"}"
ts=2022-08-06T17:17:21.877880035Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-06T17:11:01.703Z, series={__name__=\"node_cooling_device_cur_state\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", name=\"10\", technical_service=\"Bluerock\", type=\"Processor\"}"
ts=2022-08-06T17:17:58.92323756Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=458 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (90909.09090909091) exceeded while adding 458 samples and 0 metadata"
ts=2022-08-06T17:18:05.451154165Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (100000) exceeded while adding 932 samples and 0 metadata"
[ OK ]
ts=2022-08-08T18:43:03Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-08T18:43:03.904094832Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:43:03.904327248Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:43:08.919343733Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:43:09.148772895Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:43:14.156478016Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:43:14.156736851Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:43:19.179783448Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:43:19.180049431Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:43:24.190761557Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:43:24.191102896Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:43:29.201220343Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:43:29.201376925Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:43:34.207630253Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:43:34.208025558Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:43:39.213864314Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:43:39.214045533Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:43:44.242529137Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:43:44.242874988Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:43:49.252414113Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:43:49.252755917Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:43:54.261539665Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:43:54.261922555Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:43:59.268362704Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:43:59.268582677Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:44:04.307490552Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:44:04.307868747Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:44:09.315011354Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:44:09.315244135Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:44:14.322222687Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:44:14.32249609Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:44:19.330119002Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:44:19.330332361Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:44:24.335480014Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:44:24.335736828Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:44:29.342506128Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:44:29.342691903Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:44:34.352703451Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:44:34.352924264Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:44:39.360537128Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:44:39.360788056Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:44:44.367103296Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:44:44.367371248Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:44:49.374185426Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:44:49.374425804Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:44:54.382007449Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:44:54.382191149Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:44:59.391010388Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:44:59.39126508Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:45:04.397855626Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:45:04.398181366Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:45:09.403739906Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:45:09.403918702Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:45:14.409623854Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:45:14.409826576Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:45:19.4216422Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:45:19.421989431Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:45:24.430119918Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:45:24.430331576Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:45:29.435655355Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:45:29.435855268Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:45:34.442573512Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:45:34.442855451Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:45:39.455184236Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:45:39.455383032Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:45:44.461987477Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:45:44.462183862Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:45:49.473505206Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:45:49.473846532Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:45:54.483251794Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:45:54.483406276Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:45:59.491106864Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:45:59.491313065Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:46:04.503810885Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:46:04.504122887Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:46:09.510146245Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:46:09.51037644Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:46:14.516385355Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:46:14.51659986Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:46:19.522448946Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:46:19.522648534Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:46:24.532759581Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:46:24.563261942Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:46:29.570359639Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:46:29.570706475Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:46:34.576374982Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:46:34.576565741Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:46:39.58302195Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:46:39.583296902Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:46:44.592592585Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:46:44.592865784Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:46:49.599116708Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:46:49.599300055Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:46:54.604892187Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:46:54.605099767Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:46:59.611621368Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:46:59.61180429Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:47:04.618626461Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:47:04.618810622Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:47:09.627274429Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:47:09.627435979Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:47:14.633236594Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:47:14.633426549Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:47:19.638989969Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:47:19.639196125Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:47:24.657547648Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:47:24.65789921Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:47:29.663578771Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:47:29.663795083Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:47:34.672429643Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:47:34.672616973Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:47:39.678937968Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:47:39.679140959Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:47:44.685008075Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:47:44.685194809Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:47:49.693830523Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:47:49.694240282Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:47:54.703216644Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:47:54.703470017Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:47:59.719479835Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:47:59.719856411Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:48:04.7307252Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:48:04.731020614Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:48:09.74557832Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:48:09.745850195Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:48:14.754266758Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:48:14.754495544Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:48:19.763803628Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:48:19.764100106Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:48:24.778753357Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:48:24.778953185Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:48:29.785235626Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:48:29.785450979Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:48:34.793579913Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:48:34.794051276Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:48:39.800441553Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:48:39.800619869Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:48:44.808292069Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:48:44.808510731Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:48:49.832197883Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:48:49.832420421Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:48:54.840732235Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:48:54.840964809Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:48:59.85109121Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:48:59.851390298Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:49:04.858455957Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:49:04.858757321Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:49:09.867964432Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:49:09.868191472Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:49:14.875076087Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:49:14.875249268Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:49:19.88452841Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:49:19.884776931Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:49:24.892191012Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:49:24.892361844Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:49:29.903064266Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:49:29.903448361Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:49:34.910509823Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:49:34.911792948Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:49:39.919371892Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:49:39.919534623Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:49:44.94349855Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:49:44.943977825Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:49:49.955421965Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:49:49.95580845Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:49:54.963396106Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:49:54.963752077Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:49:59.976371993Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:49:59.976771425Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:50:04.984764507Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:50:04.984961139Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:50:09.996041177Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:50:09.996403068Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:50:15.008040695Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:50:15.008226126Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:50:20.015334003Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:50:20.015580345Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:50:25.026049786Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:50:25.026292661Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:50:30.033184235Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:50:30.033395252Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:50:35.040166566Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:50:35.040417686Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:50:40.046617246Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:50:40.046842246Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:50:45.053271728Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:50:45.053450913Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:50:50.062600716Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:50:50.062904886Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:50:55.071095071Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:50:55.071281434Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:51:00.079629388Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:51:00.079999226Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:51:05.08683199Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:51:05.087001166Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:51:10.099310835Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:51:10.099826239Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:51:15.121698413Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:51:15.122185831Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:51:20.128996128Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:51:20.129417303Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:51:25.136492958Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:51:25.136696172Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:51:30.144956899Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:51:30.145130733Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:51:35.154928905Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:51:35.155165568Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:51:40.164647085Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:51:40.1648694Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:51:45.176496051Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:51:45.17669384Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:51:50.188524718Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:51:50.188852787Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:51:55.205101154Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:51:55.205624249Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:52:00.214426013Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:52:00.214624061Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:52:05.226407599Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:52:05.226870806Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:52:10.239489073Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:52:10.239887672Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:52:15.302670316Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:52:15.302936368Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:52:20.313416202Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:52:20.313707003Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:52:25.329782717Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:52:25.330235257Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:52:30.339194998Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:52:30.339496895Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:52:35.350178222Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:52:35.350480321Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:52:40.35825845Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:52:40.358484174Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:52:45.373797011Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:52:45.373998441Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:52:50.38635671Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:52:50.386562457Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:52:55.396706135Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:52:55.396986986Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:53:00.404281224Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:53:00.404470095Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:53:05.412662432Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:53:05.413061483Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:53:10.423226416Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:53:10.423386154Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:53:15.432975833Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:53:15.433166969Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:53:20.443197287Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:53:20.443383104Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:53:25.454563374Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:53:25.454969686Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:53:30.46275792Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:53:30.462927989Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:53:35.47182801Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:53:35.472042056Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:53:40.479606568Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:53:40.479837182Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:53:45.488251279Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:53:45.48846806Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:53:50.49684585Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:53:50.497061196Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:53:55.506939283Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:53:55.507160469Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:54:00.514778692Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:54:00.514950752Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:54:05.522602194Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:54:05.522788745Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:54:10.531264634Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:54:10.531453815Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:54:15.539366168Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:54:15.539565535Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:54:20.548405972Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:54:20.548627638Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:54:25.55804209Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:54:25.558249436Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:54:30.576295149Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:54:30.576604409Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:54:35.585091025Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:54:35.585273155Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:54:40.592160238Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:54:40.5923204Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:54:45.609891801Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:54:45.610312741Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:54:50.622929613Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:54:50.623356876Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:54:55.646034303Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:54:55.646269667Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:55:00.654417918Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:55:00.654655677Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:55:05.668283625Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:55:05.668573357Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:55:10.681364131Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:55:10.681694002Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:55:15.697610244Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:55:15.697980521Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:55:20.705497686Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:55:20.705722082Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:55:25.713682318Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:55:25.713859566Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:55:30.727279969Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:55:30.727997316Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:55:35.751898257Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:55:35.752262068Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:55:40.758787791Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:55:40.758964618Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:55:45.770842461Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:55:45.771102474Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:55:50.785862942Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:55:50.786201712Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:55:55.802862026Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:55:55.803360345Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:56:00.811593671Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:56:00.81189806Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:56:05.819167639Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:56:05.819428885Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:56:10.830243758Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:56:10.830594448Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:56:15.838465357Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:56:15.838680929Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:56:20.847789356Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:56:20.848031641Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:56:25.857419493Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:56:25.857619923Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:56:30.868874464Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:56:30.869136136Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:56:35.877426177Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:56:35.877643518Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:56:40.889878237Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:56:40.890141849Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:56:45.900666656Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:56:45.900924128Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:56:50.909644547Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:56:50.909833831Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:56:55.918576865Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:56:55.918809434Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:57:00.927995532Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:57:00.928198091Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:57:05.938647758Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:57:05.938832519Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:57:10.947739719Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:57:10.947937951Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:57:15.955595665Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:57:15.955825811Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:57:20.971424737Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:57:20.971831545Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:57:25.981216176Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:57:25.981423679Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:57:30.994972676Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:57:30.995285953Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:57:36.008397921Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:57:36.008699441Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:57:41.032622289Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:57:41.032883256Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:57:46.049870152Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:57:46.050169836Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:57:51.070112794Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:57:51.070372002Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:57:56.080845487Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:57:56.08103813Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:58:01.095909656Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:58:01.09626638Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:58:06.111900304Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:58:06.112234873Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:58:11.124866404Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:58:11.125185572Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:58:16.141266408Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:58:16.141558436Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:58:21.153602005Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:58:21.153929905Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:58:26.166811216Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:58:26.167160338Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:58:31.193345348Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:58:31.193694322Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:58:36.205443115Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:58:36.20568878Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:58:41.217505598Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:58:41.217769117Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:58:46.228312232Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:58:46.228699587Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:58:51.244609387Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:58:51.29986088Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:58:56.310099655Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:58:56.310326073Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:59:01.323226598Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:59:01.323446681Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:59:06.3344086Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:59:06.334604947Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:59:11.355354842Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:59:11.355819279Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:59:16.369370057Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:59:16.369533674Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:59:21.383689974Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:59:21.383913918Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:59:26.393182008Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:59:26.393383257Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:59:31.41056709Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:59:31.410990968Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:59:36.426015011Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:59:36.426362948Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:59:41.436333342Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:59:41.436573144Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:59:46.448580577Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:59:46.448962935Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:59:51.459843494Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:59:51.460147162Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T18:59:56.472491282Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T18:59:56.4733525Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:00:01.58321812Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:00:01.583512422Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:00:06.597270476Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:00:06.597616197Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:00:11.60885482Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:00:11.60911125Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:00:16.619991296Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:00:16.620179804Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:00:21.634905373Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:00:21.635278652Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:00:26.798737604Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:00:26.79895158Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:00:31.814946805Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:00:31.815343511Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:00:36.825526449Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:00:36.825848219Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:00:41.846563425Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:00:41.847160089Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:00:46.857449Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:00:46.857791587Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:00:51.898377669Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:00:51.898706514Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:00:56.913998862Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:00:56.914341969Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:01:01.951539621Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:01:01.951865774Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:01:06.968900374Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:01:06.969148638Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:01:11.986253007Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:01:11.986521141Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:01:17.028301452Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:01:17.028695835Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:01:22.067817991Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:01:22.068458574Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:01:27.198258167Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:01:27.206294054Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:01:32.223011312Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:01:32.223271452Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:01:37.238962292Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:01:37.239268506Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:01:42.254474105Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:01:42.254978775Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:01:47.271206952Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:01:47.275844127Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:01:52.292063516Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:01:52.292442188Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:01:57.346203439Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:01:57.346641371Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:02:02.406540586Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:02:02.406922608Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:02:07.427818389Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:02:07.428081942Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:02:12.440100529Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:02:12.440267756Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:02:17.504780529Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:02:17.507010028Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:02:22.523532087Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:02:22.523931297Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:02:27.547638711Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:02:27.548290063Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:02:32.618936325Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:02:32.619346589Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:02:37.637975186Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:02:37.641844195Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:02:42.659100475Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:02:42.659490628Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:02:47.671466118Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:02:47.671754144Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:02:52.684572529Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:02:52.685932476Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:02:57.703054697Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:02:57.703296792Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:03:02.720077025Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:03:02.720402327Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:03:07.73746502Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:03:07.737875078Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:03:12.755431878Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:03:12.755852853Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:03:17.770018891Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:03:17.770253384Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:03:22.787788611Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:03:22.788110225Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:03:27.825066293Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:03:27.825501706Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:03:32.836375993Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:03:32.836547607Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:03:37.853437126Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:03:37.853791983Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:03:42.872164849Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:03:42.87252257Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:03:47.88338141Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:03:47.883640435Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:03:52.900373074Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:03:52.900743473Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:03:57.917953708Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:03:57.91829461Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:04:02.931329476Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:04:02.931589874Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:04:07.948234115Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:04:07.948805047Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:04:12.962213306Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:04:12.962361026Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:04:17.98666744Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:04:17.990689198Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:04:23.003238771Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:04:23.003568396Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:04:28.019922092Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:04:28.020169013Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:04:33.031569773Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:04:33.031809726Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:04:38.04920804Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:04:38.049670881Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:04:43.062953468Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:04:43.063146113Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:04:48.074465704Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:04:48.074688114Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:04:53.089182062Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:04:53.089409321Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:04:58.133123567Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:04:58.13359986Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:05:03.154033837Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:05:03.154524534Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:05:08.173404879Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:05:08.173688378Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:05:13.197848525Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:05:13.198160932Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:05:18.215233851Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:05:18.215574837Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:05:23.227950873Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:05:23.228225613Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:05:28.244010339Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:05:28.244270173Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:05:33.252422578Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:05:33.252588452Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:05:38.274673179Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:05:38.275039633Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:05:43.28796929Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:05:43.288168839Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:05:48.299983597Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:05:48.300212556Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:05:53.314547058Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:05:53.314829692Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:05:58.327275937Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:05:58.327478281Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:06:03.348503435Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:06:03.348841818Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:06:08.37603858Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:06:08.376446446Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:06:13.396153163Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:06:13.396944098Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:06:18.410058302Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:06:18.410395337Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:06:23.422493295Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:06:23.422794291Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:06:28.442776019Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:06:28.443271578Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:06:33.477372769Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:06:33.477760516Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:06:38.495083772Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:06:38.495372018Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:06:43.516988922Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:06:43.517450894Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:06:48.531815207Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:06:48.532078292Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:06:53.560205883Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:06:53.560662019Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:06:58.578754153Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:06:58.579203307Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:07:03.663683466Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:07:03.664026368Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:07:08.689088883Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:07:08.689444589Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:07:13.700822908Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:07:13.701043466Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:07:18.713765749Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:07:18.714023306Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:07:23.72848651Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:07:23.728833048Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:07:28.74421204Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:07:28.749166049Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:07:33.766075073Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:07:33.76636385Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:07:38.776465322Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:07:38.776717506Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:07:43.78819331Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:07:43.788590373Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:07:48.797301542Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:07:48.797500109Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:07:53.808244709Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:07:53.808441867Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:07:58.824322014Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:07:58.824671035Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:08:03.840868326Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:08:03.841157696Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:08:08.851964605Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:08:08.852139796Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:08:13.866342517Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:08:13.866869586Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:08:18.884477522Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:08:18.884961593Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:08:23.917026996Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:08:23.918833007Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:08:28.933487878Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:08:28.93377319Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:08:33.950613178Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:08:33.950979502Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:08:39.027420094Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:08:39.027747281Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:08:44.044241488Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:08:44.044623986Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:08:49.061678171Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:08:49.062095153Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:08:54.072586745Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:08:54.073054189Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:08:59.087904483Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:08:59.088168635Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:09:04.121380341Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:09:04.121891297Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:09:09.132764193Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:09:09.133008743Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:09:14.142316389Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:09:14.142767205Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:09:19.158920407Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:09:19.159273721Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:09:24.175030852Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:09:24.175388722Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:09:29.190534869Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:09:29.190885085Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:09:34.207673693Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:09:34.208051951Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:09:39.217595372Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:09:39.217817526Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:09:44.242843568Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:09:44.243207863Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:09:49.255499421Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:09:49.255762698Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:09:54.267757851Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:09:54.278043955Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:09:59.294250735Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:09:59.29460754Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:10:04.31136689Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:10:04.311836397Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:10:09.337350245Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:10:09.337615134Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:10:14.352689223Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:10:14.352974511Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:10:19.369765817Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:10:19.37803731Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:10:24.387504853Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:10:24.387755411Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:10:29.401838478Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:10:29.402081988Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:10:34.412669022Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:10:34.413007295Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:10:39.428829714Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:10:39.429149859Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:10:44.453893045Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:10:44.454266854Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:10:49.463650238Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:10:49.46388768Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:10:54.473274626Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:10:54.473599723Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:10:59.482787475Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:10:59.483018269Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:11:04.492601982Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:11:04.492879579Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:11:09.508098004Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:11:09.508376264Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:11:14.518014888Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:11:14.518214101Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:11:19.539204024Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:11:19.539518242Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:11:24.548418428Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:11:24.548610895Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:11:29.55714383Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:11:29.557303827Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:11:34.566586072Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:11:34.59987934Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:11:39.609383206Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:11:39.609529576Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:11:44.625263376Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:11:44.62619698Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:11:49.637740817Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:11:49.638161572Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:11:54.65442991Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:11:54.654797961Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:11:59.666513468Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:11:59.666775588Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:12:04.682979558Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:12:04.683166714Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:12:09.692687831Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:12:09.692946252Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:12:14.709695377Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:12:14.710111765Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:12:19.719361724Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:12:19.722237565Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:12:24.737512251Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:12:24.737871196Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:12:29.75433604Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:12:29.754684747Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:12:34.790627642Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:12:34.805124338Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:12:39.82138686Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:12:39.821742337Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:12:44.832973116Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:12:44.833290078Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:12:49.851574958Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:12:49.852141356Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:12:54.862603283Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:12:54.862927013Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:12:59.874368167Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:12:59.874565391Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:13:04.88509763Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:13:04.885287817Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:13:09.896490178Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:13:09.896889147Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:13:14.912502152Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:13:14.912907663Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:13:19.923345038Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:13:19.923533855Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:13:24.934410729Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:13:24.934609306Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:13:29.944519763Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:13:29.944731556Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:13:34.968887261Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:13:34.96936988Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:13:39.98023315Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:13:39.980457888Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:13:44.990943715Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:13:44.991135524Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:13:50.012991559Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:13:50.013210505Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:13:55.022395879Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:13:55.022577334Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:14:00.034336435Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:14:00.034603366Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:14:05.045908736Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:14:05.046077947Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:14:10.056432378Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:14:10.056652756Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:14:15.068546899Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:14:15.068764464Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:14:20.082903529Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:14:20.08317213Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:14:25.099837741Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:14:25.100184446Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:14:30.115951688Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:14:30.116365943Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:14:35.128682317Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:14:35.129029074Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:14:40.139787613Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:14:40.140045137Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:14:45.151349471Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:14:45.151659368Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:14:50.169543257Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:14:50.170145755Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:14:55.229373667Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:14:55.229702244Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:15:00.245513072Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:15:00.245813558Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:15:05.26291423Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:15:05.263250323Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:15:10.279566529Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:15:10.279922296Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:15:15.289898391Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:15:15.290178969Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:15:20.320322677Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:15:20.320940844Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:15:25.331134466Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:15:25.331312342Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:15:30.352021532Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:15:30.352387577Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:15:35.365004256Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:15:35.36523851Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:15:40.3809743Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:15:40.38136697Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:15:45.398780885Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:15:45.399063034Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:15:50.416138249Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:15:50.416485241Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:15:55.441143462Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:15:55.441637164Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:16:00.457943743Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:16:00.45826224Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:16:05.485265915Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:16:05.485593536Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:16:10.549030652Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:16:10.549426515Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:16:15.560723266Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:16:15.560945808Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:16:20.581753064Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:16:20.582360337Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:16:25.594803476Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:16:25.59507845Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:16:30.614397429Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:16:30.614696943Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:16:35.631761989Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:16:35.632060832Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:16:40.654247276Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:16:40.654680241Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:16:45.667986667Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:16:45.668229677Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:16:50.684245433Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:16:50.68445858Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:16:55.707012499Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:16:55.707449911Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:17:00.732247021Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:17:00.732573095Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:17:05.791103082Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:17:05.791598879Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:17:10.808552457Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:17:10.808896404Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:17:15.830814341Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:17:15.831101215Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:17:20.851735542Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:17:20.852078736Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:17:25.874517022Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:17:25.874902842Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:17:30.890364027Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:17:30.890580994Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:17:35.916726767Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:17:35.917093155Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:17:40.931573252Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:17:40.931867024Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:17:45.950554928Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:17:45.950892897Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:17:50.963768368Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:17:51.095182802Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:17:56.10937364Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:17:56.109597875Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:18:01.138498618Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:18:01.138683115Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:18:06.149811532Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:18:06.150010369Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:18:11.16687082Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:18:11.167073432Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:18:16.181075508Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:18:16.181235137Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:18:21.194486992Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:18:21.19464279Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:18:26.211501169Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:18:26.211825656Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:18:31.221988204Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:18:31.222197933Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:18:36.236682243Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:18:36.236924295Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:18:41.25545901Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:18:41.255783385Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:18:46.267477802Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:18:46.267718175Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:18:51.286428944Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:18:51.343535739Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:18:56.357396646Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:18:56.357775462Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:19:01.386306444Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:19:01.386618564Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:19:06.397459342Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:19:06.397660013Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:19:11.40954046Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:19:11.409753004Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:19:16.421941642Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:19:16.422144839Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:19:21.434029811Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:19:21.434211095Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:19:26.450399027Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:19:26.450609335Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:19:31.463933458Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:19:31.464136332Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:19:36.477254612Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:19:36.477525289Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:19:41.495341717Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:19:41.495594639Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:19:46.511114334Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:19:46.511362865Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:19:51.525907663Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:19:51.526111772Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:19:56.536539826Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:19:56.536838137Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:20:01.565387267Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:20:01.566080513Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:20:06.579086262Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:20:06.579286877Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:20:11.591844524Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:20:11.592043301Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:20:16.603062767Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:20:16.603220602Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:20:21.616791715Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:20:21.617128508Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:20:26.628160646Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:20:26.628393994Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:20:31.646622172Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:20:31.647034538Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:20:36.660326741Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:20:36.660547215Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:20:41.675287848Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:20:41.675576004Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:20:46.690314264Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:20:46.690604661Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:20:51.702862778Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:20:51.703082242Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:20:56.717800311Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:20:56.71805537Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:21:01.737112009Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:21:01.737567849Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:21:06.747918849Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:21:06.748119038Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:21:11.760431904Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:21:11.76063688Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:21:16.782858383Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:21:16.783160015Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:21:21.803163921Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:21:21.803525873Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:21:26.813815573Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:21:26.814076507Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:21:31.827301042Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:21:31.827493573Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:21:36.838701887Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:21:36.838949159Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:21:41.853350658Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:21:41.853534178Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:21:46.907166543Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:21:46.907447133Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:21:51.925891462Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:21:51.926285365Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:21:56.939531543Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:21:56.939746986Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:22:01.966881857Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:22:01.967501718Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:22:06.988834836Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:22:06.989195391Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:22:12.007536424Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:22:12.00788309Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:22:17.021865408Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:22:17.022126936Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:22:22.034376045Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:22:22.034563444Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:22:27.050942322Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:22:27.051260095Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:22:32.064138215Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:22:32.064529948Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:22:37.077953862Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:22:37.078183319Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:22:42.090740757Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:22:42.090909435Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:22:47.105089972Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:22:47.105327623Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:22:52.118877654Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:22:52.119206193Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:22:57.139431694Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:22:57.139814184Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:23:02.161684379Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:23:02.161953329Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:23:07.176207468Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:23:07.176419666Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:23:12.190238425Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:23:12.190468824Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:23:17.205658299Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:23:17.206028269Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:23:22.221744446Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:23:22.221973164Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:23:27.238053727Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:23:27.238382987Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:23:32.253128865Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:23:32.253382958Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:23:37.266686616Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:23:37.266909163Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:23:42.280211851Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:23:42.280401442Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:23:47.296397956Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:23:47.296594134Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:23:52.310505242Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:23:52.310853009Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:23:57.323960773Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:23:57.324180475Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:24:02.357580453Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:24:02.357936438Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:24:07.373382621Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:24:07.37362826Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:24:12.388619976Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:24:12.388907933Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:24:17.413777447Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:24:17.414462189Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:24:22.433368913Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:24:22.433568088Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:24:27.471202726Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:24:27.4715587Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:24:32.487751901Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:24:32.487999836Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:24:37.502333619Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:24:37.502540266Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:24:42.521627462Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:24:42.521839099Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:24:47.542098633Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:24:47.542379403Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:24:52.566351555Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:24:52.566936588Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:24:57.589432157Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:24:57.589812352Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:25:02.604251774Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:25:02.604556376Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:25:07.621882525Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:25:07.622255489Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:25:12.643763945Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:25:12.644114519Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:25:17.659983559Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:25:17.660175424Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:25:22.674831431Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:25:22.675165202Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:25:27.690209018Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:25:27.690406348Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:25:32.704567987Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:25:32.704767078Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:25:37.737382699Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:25:37.737779448Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:25:42.759986103Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:25:42.760497273Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:25:47.78429148Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:25:47.784689254Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:25:52.822681902Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:25:52.823207695Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:25:57.845287767Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:25:57.845594322Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:26:02.894099247Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:26:02.894578899Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:26:07.918006722Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:26:07.918415891Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:26:12.937972857Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:26:12.938384657Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:26:17.957510798Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:26:17.957835484Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:26:22.984036638Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:26:22.984486488Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:26:28.014805069Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:26:28.015400658Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:26:33.043669283Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:26:33.044057688Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:26:38.063082468Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:26:38.063295773Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:26:43.079552996Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:26:43.079937704Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:26:48.106360521Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:26:48.106860225Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:26:53.132704613Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:26:53.133221048Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:26:58.196596006Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:26:58.197139376Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:27:03.256629325Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:27:03.257001203Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:27:08.289580266Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:27:08.289885916Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:27:13.319609046Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:27:13.320426996Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:27:18.34708128Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:27:18.347432262Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:27:23.392614209Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:27:23.393170498Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:27:28.415671519Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:27:28.46409342Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:27:33.485200713Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:27:33.485507793Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:27:38.518642872Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:27:38.518960403Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:27:43.546594178Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:27:43.546922656Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:27:48.574618323Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:27:48.575006824Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:27:53.597835407Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:27:53.598146431Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:27:58.617738634Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:27:58.618057666Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:28:03.640895113Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:28:03.641207611Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:28:08.662507038Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:28:08.662862117Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:28:13.675665622Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:28:13.675913903Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:28:18.706672508Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:28:18.707081385Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:28:23.720743833Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:28:23.721137475Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:28:28.737632043Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:28:28.737853563Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:28:33.757488853Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:28:33.75785748Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:28:38.771871096Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:28:38.772205303Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:28:43.783425812Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:28:43.783631751Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:28:48.801100082Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:28:48.801593523Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:28:53.817059724Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:28:53.817258103Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:28:58.828898109Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:28:58.829084896Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:29:03.848067661Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:29:03.848356278Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:29:08.860955152Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:29:08.86121394Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:29:13.872745892Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:29:13.873029126Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:29:18.886429893Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:29:18.88676289Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:29:23.899406844Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:29:23.899777151Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:29:28.913505909Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:29:28.913746322Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:29:33.935251058Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:29:33.93559943Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:29:38.954704335Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:29:38.95506166Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:29:43.983195566Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:29:43.983470207Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:29:49.003451725Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:29:49.003772875Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:29:54.016322403Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:29:54.016534027Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:29:59.043345273Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:29:59.043578118Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:30:04.072010498Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:30:04.072422449Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:30:09.089310748Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:30:09.089731097Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:30:14.10175369Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:30:14.101984806Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:30:19.113822013Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:30:19.114093028Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:30:24.12689866Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:30:24.143501041Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:30:29.155336753Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:30:29.155584658Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:30:34.167786285Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:30:34.167957427Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:30:39.198292982Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:30:39.198653818Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:30:44.211056169Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:30:44.211323197Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:30:49.223257344Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:30:49.223476539Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:30:54.236537665Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:30:54.236785394Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:30:59.260924192Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:30:59.261244286Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:31:04.278162716Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:31:04.278465346Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:31:09.29364113Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:31:09.293895417Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:31:14.311537809Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:31:14.311794906Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:31:19.324367545Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:31:19.324588633Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:31:24.340586057Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:31:24.340976278Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:31:29.36209531Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:31:29.362463274Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:31:34.374603187Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:31:34.374988924Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:31:39.390385859Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:31:39.390610038Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:31:44.404631009Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:31:44.404857046Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:31:49.418585942Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:31:49.418836577Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:31:54.434310613Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:31:54.434631888Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:31:59.446209203Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:31:59.44640052Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:32:04.461686161Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:32:04.461969887Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:32:09.48285372Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:32:09.48322144Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:32:14.496286655Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:32:14.496600357Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:32:19.519675917Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:32:19.607970314Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:32:24.633848964Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:32:24.634222317Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:32:29.649225436Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:32:29.649631824Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:32:34.684909146Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:32:34.685280083Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:32:39.705974449Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:32:39.706359034Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:32:44.727789524Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:32:44.728087556Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:32:49.759737993Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:32:49.760137944Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:32:54.779160787Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:32:54.779341385Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:32:59.802553391Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:32:59.802985017Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:33:04.824097649Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:33:04.824445706Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:33:09.842596261Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:33:09.842926337Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:33:14.868608832Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:33:14.872668206Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:33:19.886025788Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:33:19.886266614Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:33:24.907092864Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:33:24.907408811Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:33:29.925985745Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:33:29.926439137Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:33:34.945501745Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:33:35.040234991Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:33:40.06212705Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:33:40.062582884Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:33:45.10407713Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:33:45.104397852Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:33:50.144080248Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:33:50.144571648Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:33:55.160305241Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:33:55.160460797Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:34:00.202215852Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:34:00.202614274Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:34:05.22326101Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:34:05.223537952Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:34:10.24954337Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:34:10.24997861Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:34:15.272319861Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:34:15.272649669Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:34:20.293032792Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:34:20.293295477Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:34:25.313419418Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:34:25.313761896Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:34:30.334125746Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:34:30.334571505Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:34:35.356300668Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:34:35.356855055Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:34:40.383059596Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:34:40.383516929Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:34:45.405382442Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:34:45.405774584Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:34:50.427852335Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:34:50.428283185Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:34:55.451352314Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:34:55.451727538Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:35:00.465719807Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:35:00.465965706Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:35:05.481102548Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:35:05.481288654Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:35:10.509847274Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:35:10.510365251Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:35:15.54546244Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:35:15.546003831Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:35:20.560237155Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:35:20.560452969Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:35:25.584914452Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:35:25.585144753Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:35:30.598181107Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:35:30.598360734Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:35:35.621614005Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:35:35.621961961Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:35:40.638865814Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:35:40.639154013Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:35:45.66194475Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:35:45.662473622Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:35:50.684115639Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:35:50.684448999Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:35:55.699377098Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:35:55.699683509Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:36:00.714138939Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:36:00.714375857Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:36:05.72751909Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:36:05.727774904Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:36:10.742134533Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:36:10.742469597Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:36:15.754988787Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:36:15.755206453Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:36:20.816767993Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:36:20.817003807Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:36:25.837656515Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:36:25.838025421Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:36:30.853361058Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:36:30.853606944Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:36:35.867318444Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:36:35.867534036Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:36:40.88135194Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:36:40.881534141Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:36:45.894435835Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:36:45.894595783Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:36:50.911986898Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:36:50.912178701Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:36:55.933609014Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:36:55.934093537Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:37:00.955668063Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:37:00.956045612Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:37:05.96909695Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:37:05.96933904Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:37:10.983495314Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:37:10.983734651Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:37:15.997425011Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:37:15.997626961Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:37:21.014206327Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:37:21.014814992Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:37:26.031879115Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:37:26.032241938Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:37:31.047458821Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:37:31.047754384Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:37:36.071061754Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:37:36.071498543Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:37:41.093586479Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:37:41.093847882Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:37:46.118215173Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:37:46.118751089Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:37:51.145614421Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:37:51.145964758Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:37:56.15867409Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:37:56.158984797Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:38:01.175692431Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:38:01.179320505Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:38:06.202231548Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:38:06.202593628Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:38:11.223129094Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:38:11.223370914Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:38:16.243953728Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:38:16.24441276Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:38:21.259527415Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:38:21.259854503Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:38:26.287281022Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:38:26.287676726Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:38:31.305304889Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:38:31.305522298Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:38:36.323488096Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:38:36.323676302Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:38:41.34531796Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:38:41.345560193Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:38:46.365657883Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:38:46.365998247Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:38:51.38627526Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:38:51.386496412Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:38:56.403299661Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:38:56.403509334Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:39:01.435499553Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:39:01.43610739Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:39:06.461545716Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:39:06.462091612Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:39:11.480105532Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:39:11.480408535Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:39:16.494948832Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:39:16.495262642Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:39:21.514931466Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:39:21.515250402Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:39:26.531925312Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:39:26.532094175Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:39:31.548275811Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:39:31.548522722Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:39:36.567874143Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:39:36.635251649Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:39:41.650795507Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:39:41.651016341Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:39:46.66929812Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:39:46.669517112Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:39:51.695167763Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:39:51.695563196Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:39:56.730584989Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:39:56.731000946Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:40:01.85838022Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:40:01.858805314Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:40:06.887944689Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:40:06.889118825Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:40:11.918067094Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:40:11.918510731Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:40:16.933737122Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:40:16.933971509Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:40:21.954651675Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:40:21.954902112Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:40:26.979123498Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:40:26.979453162Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:40:32.02730429Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:40:32.027679713Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:40:37.051513971Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:40:37.052088219Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:40:42.076979046Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:40:42.077299957Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:40:47.100558729Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:40:47.100934126Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:40:52.126631795Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:40:52.127268009Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:40:57.153250078Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:40:57.153524128Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:41:02.197766101Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:41:02.198098922Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:41:07.276170427Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:41:07.276561139Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:41:12.292516826Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:41:12.292785221Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:41:17.310318768Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:41:17.310489795Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:41:22.325999731Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:41:22.326232451Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:41:27.345801381Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:41:27.346064738Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:41:32.369421363Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:41:32.370036635Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:41:37.386301872Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:41:37.386500717Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:41:42.411118341Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:41:42.411347063Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:41:47.435851334Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:41:47.436287282Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:41:52.454618505Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:41:52.454916807Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:41:57.482681136Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:41:57.483353789Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:42:02.520526994Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:42:02.5209879Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:42:07.537797966Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:42:07.538009985Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:42:12.565072088Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:42:12.565392549Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:42:17.592117254Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:42:17.592436128Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:42:22.610324265Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:42:22.610604528Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:42:27.635219346Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:42:27.635584851Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:42:32.659010505Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:42:32.659433276Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:42:37.675855593Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:42:37.73748108Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:42:42.761958676Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:42:42.791156874Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:42:47.81520555Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:42:47.815474843Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:42:52.833294573Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:42:52.83346535Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:42:57.854146602Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:42:57.854510785Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:43:02.882050875Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:43:02.882453659Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:43:07.90791182Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:43:07.908234623Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:43:12.95364514Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:43:12.954025556Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:43:17.968664311Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:43:17.968873613Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:43:22.995961066Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:43:22.99637722Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:43:28.015432903Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:43:28.015655763Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:43:33.035958347Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:43:33.036245669Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:43:38.053646044Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:43:38.05390362Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:43:43.089269734Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:43:43.089573092Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:43:48.10546243Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:43:48.105673777Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:43:53.123355214Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:43:53.123570865Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:43:58.139999044Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:43:58.140358491Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:44:03.157853216Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:44:03.15804573Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:44:08.175946461Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:44:08.176192424Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:44:13.193938739Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:44:13.194159133Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:44:18.212395179Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:44:18.21265817Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:44:23.235980418Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:44:23.236383961Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:44:28.257249804Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:44:28.257513125Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:44:33.283107732Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:44:33.283426236Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:44:38.305640059Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:44:38.306195265Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:44:43.32445531Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:44:43.324625414Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:44:48.340210938Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:44:48.340438268Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:44:53.357405968Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:44:53.357635161Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:44:58.378194349Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:44:58.378616199Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:45:03.409040478Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:45:03.409376824Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:45:08.427310088Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:45:08.427552994Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:45:13.448023486Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:45:13.448390757Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:45:18.466575868Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:45:18.466964537Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:45:23.489958901Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:45:23.490179897Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:45:28.513728241Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:45:28.513988948Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:45:33.537527914Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:45:33.537781772Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:45:38.552296371Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:45:38.552526448Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:45:43.579766331Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:45:43.580083791Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:45:48.594768524Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:45:48.594964285Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:45:53.613324196Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:45:53.613530641Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:45:58.631869424Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:45:58.632106304Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:46:03.653029381Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:46:03.653218351Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:46:08.667383098Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:46:08.667610613Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:46:13.682018803Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:46:13.682197017Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:46:18.69703127Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:46:18.697248618Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:46:23.711350889Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:46:23.711514065Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:46:28.725464908Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:46:28.725728794Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:46:33.740162535Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:46:33.740342112Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:46:38.754208418Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:46:38.754404492Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:46:43.771417864Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:46:43.771837498Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:46:48.786776401Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:46:48.786995006Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:46:53.803354592Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:46:53.803706231Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:46:58.822147342Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:46:58.822390306Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:47:03.843281933Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:47:03.843472627Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:47:08.857802485Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:47:08.858043828Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:47:13.877781836Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:47:13.878142966Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:47:18.89151771Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:47:18.891739593Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:47:23.913187684Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:47:23.913424763Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:47:28.929099951Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:47:28.92935969Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:47:33.943178146Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:47:33.943394371Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:47:38.967423903Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:47:38.967972422Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:47:43.996071491Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:47:43.996576877Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:47:49.015772765Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:47:49.016011558Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:47:54.037370684Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:47:54.037753067Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:47:59.064622279Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:47:59.065113168Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:48:04.084326427Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:48:04.084554704Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:48:09.104601722Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:48:09.104900155Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:48:14.124398209Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:48:14.124598967Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:48:19.144162987Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:48:19.144391491Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:48:24.162727808Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:48:24.162991732Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:48:29.181581437Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:48:29.181936269Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:48:34.200529126Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:48:34.20081338Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:48:39.230096311Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:48:39.230650773Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:48:44.246804128Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:48:44.247083447Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:48:49.263412241Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:48:49.263615473Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:48:54.279990993Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:48:54.280209852Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:48:59.299124042Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:48:59.299356945Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:49:04.31635576Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:49:04.316660679Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:49:09.332659716Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:49:09.332906236Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:49:14.349251703Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:49:14.349538365Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:49:19.366085094Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:49:19.366306363Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:49:24.382491669Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:49:24.383138438Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:49:29.405432574Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:49:29.405616099Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:49:34.420531212Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:49:34.420818165Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:49:39.437678422Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:49:39.438103322Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:49:44.458149035Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:49:44.458783409Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:49:49.478268445Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:49:49.47861852Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:49:54.495322843Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:49:54.495562923Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:49:59.516084695Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:49:59.516335688Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:50:04.531337993Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:50:04.531550508Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:50:09.545651365Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:50:09.545829876Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:50:14.56586798Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:50:14.566163154Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:50:19.581786562Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:50:19.581981351Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:50:24.597811094Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:50:24.667421761Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:50:29.682522595Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:50:29.682834284Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:50:34.698827703Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:50:34.699075858Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:50:39.717433415Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:50:39.717738715Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:50:44.732433982Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:50:44.732679957Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:50:49.749235999Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:50:49.74945408Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:50:54.776959831Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:50:54.777353486Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:50:59.807116404Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:50:59.807439269Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:51:04.834770033Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:51:04.835112102Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:51:09.854260673Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:51:09.854454013Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:51:14.879571659Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:51:14.879931467Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:51:19.918782612Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:51:19.919102303Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:51:24.935040766Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:51:24.935227084Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:51:29.950796118Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:51:29.951132907Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:51:34.96522275Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:51:34.965448792Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:51:39.979923489Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:51:39.980179059Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:51:45.00423264Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:51:45.004799365Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:51:50.028822956Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:51:50.029201221Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:51:55.043505049Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:51:55.056155525Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:52:00.080991976Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:52:00.08142648Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:52:05.11132063Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:52:05.111780015Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:52:10.139412194Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:52:10.139928016Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:52:15.166460119Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:52:15.166931482Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:52:20.19031312Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:52:20.190578217Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:52:25.224034186Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:52:25.224378268Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:52:30.249876997Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:52:30.250237087Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:52:35.277812651Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:52:35.278274897Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:52:40.300397727Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:52:40.300667167Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:52:45.329011554Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:52:45.32932343Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:52:50.345964816Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:52:50.34627461Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:52:55.366364754Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:52:55.366721017Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:53:00.391923748Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:53:00.39226807Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:53:05.421366738Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:53:05.42155721Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:53:10.440369343Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:53:10.44055788Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:53:15.462636272Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:53:15.462981144Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:53:20.480928851Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:53:20.481131207Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:53:25.496147515Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:53:25.49638338Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:53:30.512258611Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:53:30.512540738Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:53:35.529183756Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:53:35.529427785Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:53:40.545201568Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:53:40.545382537Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:53:45.562442854Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:53:45.562811899Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:53:50.579698423Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:53:50.579891743Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:53:55.595991013Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:53:55.5961628Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:54:00.612442139Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:54:00.612652575Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:54:05.634402394Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:54:05.634672192Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:54:10.650000045Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:54:10.650170646Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:54:15.666256935Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:54:15.666477966Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:54:20.686521881Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:54:20.686753709Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:54:25.704510952Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:54:25.704776191Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:54:30.723388213Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:54:30.7236867Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:54:35.739555338Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:54:35.739802695Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:54:40.757552682Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:54:40.758019527Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:54:45.777864884Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:54:45.778157422Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:54:50.813892794Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:54:50.814088809Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:54:55.830178096Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:54:55.830368275Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:55:00.850010877Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:55:00.850299613Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:55:05.867798866Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:55:05.86806595Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:55:10.884251325Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:55:10.884472939Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:55:15.901940678Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:55:15.902182989Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:55:20.918548259Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:55:20.918861107Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:55:25.940879578Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:55:25.941142938Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:55:30.961081674Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:55:30.961329869Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:55:35.982335323Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:55:35.982591557Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:55:41.007030586Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:55:41.035417904Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:55:46.063400824Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:55:46.063865959Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:55:51.082128012Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:55:51.082325719Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:55:56.101229072Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:55:56.101477362Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:56:01.119772304Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:56:01.120051807Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:56:06.157424981Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:56:06.157950066Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:56:11.180034819Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:56:11.180276032Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:56:16.197574878Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:56:16.19780763Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:56:21.215122919Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:56:21.215332533Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:56:26.233004458Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:56:26.233232906Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:56:31.255376951Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:56:31.255653484Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:56:36.284151416Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:56:36.284597916Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:56:41.309082048Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:56:41.309322854Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:56:46.32753349Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:56:46.32782579Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:56:51.34933232Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:56:51.349646029Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:56:56.375600432Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:56:56.375898452Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:57:01.404648289Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:57:01.405103227Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:57:06.421449459Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:57:06.447701724Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:57:11.465042131Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:57:11.465251813Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:57:16.483939892Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:57:16.484143037Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:57:21.502506587Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:57:21.502740615Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:57:26.520341277Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:57:26.520584371Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:57:31.542862486Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:57:31.543143684Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:57:36.563030921Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:57:36.563288085Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:57:41.596061133Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:57:41.596426534Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:57:46.619236751Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:57:46.619559058Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:57:51.640528351Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:57:51.640730313Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:57:56.658309111Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:57:56.658571312Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:58:01.698304538Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:58:01.698595971Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:58:06.724188165Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:58:06.72461542Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:58:11.748318275Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:58:11.748840901Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:58:16.771813943Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:58:16.772125776Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:58:21.790781426Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:58:21.791092928Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:58:26.81008059Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:58:26.810374437Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:58:31.847389663Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:58:31.8476842Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:58:36.878986401Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:58:36.879323714Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:58:41.900795902Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:58:41.900988029Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:58:46.935769952Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:58:46.936153669Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:58:51.958581379Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:58:51.958816824Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:58:57.003572265Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:58:57.003927355Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:59:02.064160918Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:59:02.064672747Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:59:07.102749889Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:59:07.103154662Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:59:12.124802204Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:59:12.125041924Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:59:17.14686671Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:59:17.147093378Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:59:22.170517611Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:59:22.17080806Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:59:27.190134543Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:59:27.190393684Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:59:32.249242645Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:59:32.249621406Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:59:37.269905919Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:59:37.270133562Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:59:42.295084115Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:59:42.295457696Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:59:47.325272322Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:59:47.325604705Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:59:52.353573674Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:59:52.353884048Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T19:59:57.376193779Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T19:59:57.376654179Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:00:02.506944307Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:00:02.507271855Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:00:07.543273076Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:00:07.543625428Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:00:12.578144573Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:00:12.578513463Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:00:17.608444138Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:00:17.60880111Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:00:22.628957664Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:00:22.629350613Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:00:27.673793151Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:00:27.674122415Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:00:32.703636757Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:00:32.70401207Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:00:37.740635252Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:00:37.740996671Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:00:42.784698835Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:00:42.78516001Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:00:47.814333732Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:00:47.814796967Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:00:52.835683779Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:00:52.835934007Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:00:57.867357686Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:00:57.86782538Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:01:02.903233694Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:01:02.903602327Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:01:07.92779586Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:01:07.928109618Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:01:12.962226884Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:01:12.962649757Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:01:17.997314844Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:01:17.99770121Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:01:23.0405692Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:01:23.041208323Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:01:28.075137703Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:01:28.075556573Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:01:33.203880693Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:01:33.204367908Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:01:38.227952523Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:01:38.228301512Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:01:43.2969102Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:01:43.297246975Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:01:48.337591712Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:01:48.337967706Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:01:53.363827942Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:01:53.364087168Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:01:58.414664462Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:01:58.415130166Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:02:03.570138322Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:02:03.57137551Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:02:08.611244029Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:02:08.611568795Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:02:13.645926741Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:02:13.646220511Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:02:18.673849196Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:02:18.915417957Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:02:23.946147484Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:02:23.946772725Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:02:28.982260525Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:02:28.982677988Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:02:33.99721325Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:02:33.997422007Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:02:39.014294531Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:02:39.014562397Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:02:44.037808635Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:02:44.038150458Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:02:49.059627263Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:02:49.059876727Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:02:54.079350174Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:02:54.07967123Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:02:59.093892515Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:02:59.094160728Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:03:04.109273984Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:03:04.109515377Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:03:09.12448922Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:03:09.124723006Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:03:14.139217912Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:03:14.139625861Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:03:19.166941136Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:03:19.167422676Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:03:24.182328144Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:03:24.182531924Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:03:29.197507774Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:03:29.197799365Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:03:34.213009872Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:03:34.213222946Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:03:39.229407894Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:03:39.229625306Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:03:44.245531906Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:03:44.245737441Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:03:49.278684743Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:03:49.279364231Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:03:54.295087049Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:03:54.295280825Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:03:59.313581612Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:03:59.313966086Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:04:04.334239225Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:04:04.334821699Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:04:09.349161827Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:04:09.349368461Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:04:14.364895894Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:04:14.36513584Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:04:19.382335471Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:04:19.382596574Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:04:24.405605308Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:04:24.40587346Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:04:29.420079754Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:04:29.420297874Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:04:34.436047196Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:04:34.436212892Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:04:39.45386993Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:04:39.454257098Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:04:44.468924364Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:04:44.469231322Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:04:49.483320233Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:04:49.483561171Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:04:54.51569945Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:04:54.516028336Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:04:59.538386977Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:04:59.538695622Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:05:04.566361711Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:05:04.566706016Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:05:09.58254362Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:05:09.582875007Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:05:14.608077639Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:05:14.608348132Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:05:19.62422222Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:05:19.624489302Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:05:24.640235676Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:05:24.640450648Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:05:29.655808423Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:05:29.656030037Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:05:34.682419079Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:05:34.68279415Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:05:39.702557856Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:05:39.702871964Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:05:44.721894556Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:05:44.722125584Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:05:49.742851198Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:05:49.743112668Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:05:54.775676989Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:05:54.77639315Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:05:59.791099055Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:05:59.7913113Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:06:04.815820817Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:06:04.816292221Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:06:09.839635293Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:06:09.840008027Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:06:14.853775314Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:06:14.854028135Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:06:19.867989779Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:06:19.868215748Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:06:24.881793571Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:06:24.88199319Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:06:29.899320192Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:06:29.899543108Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:06:34.91337334Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:06:34.91356189Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:06:39.927616724Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:06:39.928033607Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:06:44.947847377Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:06:44.948159065Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:06:49.965983013Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:06:49.966213331Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:06:54.996644861Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:06:54.997067157Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:07:00.012733712Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:07:00.012965382Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:07:05.028008041Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:07:05.028215056Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:07:10.046873005Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:07:10.047284985Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:07:15.064034791Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:07:15.064273211Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:07:20.078746986Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:07:20.078920697Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:07:25.102610167Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:07:25.103117488Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:07:30.120705456Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:07:30.121276835Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:07:35.160539823Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:07:35.160928748Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:07:40.176266079Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:07:40.176527712Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:07:45.212951469Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:07:45.21336231Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:07:50.254294404Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:07:50.256636813Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:07:55.274862137Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:07:55.275102643Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:08:00.294261614Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:08:00.294500458Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:08:05.324115533Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:08:05.324393362Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:08:10.343779678Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:08:10.344010704Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:08:15.363973701Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:08:15.364215442Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:08:20.382997783Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:08:20.383223623Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:08:25.407867854Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:08:25.408271142Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:08:30.426435256Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:08:30.426656492Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:08:35.453617382Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:08:35.454083929Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:08:40.472016133Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:08:40.472293662Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:08:45.490592356Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:08:45.490850424Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:08:50.510694371Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:08:50.510944177Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:08:55.528517602Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:08:55.528721035Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:09:00.5469377Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:09:00.547143996Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:09:05.570977625Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:09:05.571397569Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:09:10.601052587Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:09:10.601304062Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:09:15.6200176Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:09:15.620252485Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:09:20.643968037Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:09:20.644197332Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:09:25.664578951Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:09:25.6647997Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:09:30.694367458Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:09:30.694755782Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:09:35.71299669Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:09:35.713260047Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:09:40.729322186Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:09:40.72952664Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:09:45.744802153Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:09:45.745086653Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:09:50.768786952Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:09:50.769004478Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:09:55.793739385Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:09:55.794071126Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:10:00.820098563Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:10:00.820550305Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:10:05.847335995Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:10:05.847856849Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:10:10.871100379Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:10:10.87147166Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:10:15.890352889Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:10:15.890641417Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:10:20.915943506Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:10:20.916311951Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:10:25.932491198Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:10:25.932728926Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:10:30.957344248Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:10:30.957691365Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:10:35.98105427Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:10:35.981347466Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:10:41.000807972Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:10:41.001070287Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:10:46.02833286Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:10:46.028689941Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:10:51.056785918Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:10:51.057115798Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:10:56.083654382Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:10:56.084083211Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:11:01.102409892Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:11:01.102727034Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:11:06.125768713Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:11:06.126015018Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:11:11.146365014Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:11:11.146605846Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:11:16.165026176Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:11:16.165240646Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:11:21.18204182Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:11:21.18227467Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:11:26.201155361Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:11:26.201337988Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:11:31.220833567Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:11:31.221070094Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:11:36.239576243Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:11:36.239885646Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:11:41.262416253Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:11:41.262932208Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:11:46.280890784Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:11:46.281148774Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:11:51.306234596Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:11:51.306540065Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:11:56.322726706Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:11:56.323123305Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:12:01.381671491Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:12:01.382171901Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:12:06.401134374Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:12:06.401367804Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:12:11.423807328Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:12:11.424180999Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:12:16.44138073Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:12:16.441658695Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:12:21.460332757Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:12:21.46058755Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:12:26.48022421Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:12:26.480503202Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:12:31.498504257Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:12:31.498770943Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:12:36.516698381Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:12:36.517032095Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:12:41.553265379Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:12:41.553896003Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:12:46.587094564Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:12:46.587498323Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:12:51.605495619Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:12:51.605771308Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:12:56.647248702Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:12:56.647571178Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:13:01.682648474Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:13:01.683099999Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:13:06.702611044Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:13:06.702906794Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:13:11.730179658Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:13:11.730455162Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:13:16.760399398Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:13:16.760825978Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:13:21.789210378Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:13:21.789597866Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:13:26.820502966Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:13:26.820911358Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:13:31.844300597Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:13:31.844638766Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:13:36.86326201Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:13:36.868840994Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:13:41.898282639Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:13:41.898609193Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:13:46.922532218Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:13:46.922798794Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:13:51.962360415Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:13:51.962768247Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:13:56.99565144Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:13:56.996037471Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:14:02.063932639Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:14:02.064415898Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:14:07.099326937Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:14:07.100343572Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:14:12.118034194Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:14:12.118289881Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:14:17.136761095Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:14:17.137098316Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:14:22.163577258Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:14:22.163935782Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:14:27.196156934Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:14:27.196560872Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:14:32.218047336Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:14:32.218226027Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:14:37.236682013Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:14:37.236903147Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:14:42.259467381Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:14:42.259674861Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:14:47.288154857Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:14:47.290811005Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:14:52.309320068Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:14:52.309526924Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:14:57.334084076Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:14:57.334432103Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:15:02.390666236Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:15:02.391184278Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:15:07.428548265Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:15:07.428960628Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:15:12.46312081Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:15:12.463651089Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:15:17.493537561Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:15:17.493936469Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:15:22.541884075Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:15:22.552618435Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:15:27.583500817Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:15:27.583893521Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:15:32.601544466Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:15:32.647816405Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:15:37.681634532Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:15:37.682238975Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:15:42.717983597Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:15:42.718368245Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:15:47.740184631Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:15:47.740554687Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:15:52.767751287Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:15:52.76797492Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:15:57.802580855Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:15:57.802946878Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:16:02.829177545Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:16:02.829441889Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:16:07.85317234Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:16:07.853546461Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:16:12.889273197Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:16:12.88965876Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:16:17.925061248Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:16:17.925441397Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:16:22.950355532Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:16:22.950619095Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:16:27.973207817Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:16:27.97364242Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:16:32.995395097Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:16:32.995629603Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:16:38.026475649Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:16:38.026782871Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:16:43.053462291Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:16:43.053964828Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:16:48.076575869Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:16:48.076797887Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:16:53.099875008Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:16:53.100068818Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:16:58.120515463Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:16:58.120806597Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:17:03.146576745Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:17:03.146953353Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:17:08.16948471Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:17:08.169769181Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:17:13.19113382Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:17:13.191336783Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:17:18.220729376Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:17:18.22134636Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:17:23.252536525Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:17:23.252971661Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:17:28.275374291Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:17:28.275700771Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:17:33.296070227Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:17:33.296255265Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:17:38.316279778Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:17:38.316570381Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:17:43.35159437Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:17:43.351872408Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:17:48.373306334Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:17:48.373576428Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:17:53.402002961Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:17:53.402220166Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:17:58.42493882Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:17:58.425176028Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:18:03.45293543Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:18:03.507637772Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:18:08.53547337Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:18:08.535802626Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:18:13.551323978Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:18:13.551562558Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:18:18.571763414Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:18:18.57200574Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:18:23.597927712Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:18:23.598407946Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:18:28.615447638Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:18:28.615738842Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:18:33.63317288Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:18:33.633458725Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:18:38.651815621Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:18:38.652023199Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:18:43.668546753Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:18:43.66882781Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:18:48.685473857Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:18:48.685723995Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:18:53.705282544Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:18:53.70557857Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:18:58.728117516Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:18:58.728556534Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:19:03.746861495Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:19:03.747129192Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:19:08.7738088Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:19:08.774403978Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:19:13.792333885Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:19:13.792575365Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:19:18.810131308Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:19:19.143569903Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:19:24.173786763Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:19:24.174099732Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:19:29.200313007Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:19:29.200649542Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:19:34.220325589Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:19:34.220596883Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:19:39.25175367Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:19:39.252043668Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:19:44.269781961Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:19:44.270024461Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:19:49.290262811Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:19:49.290496512Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:19:54.306117068Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:19:54.306324381Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:19:59.328426553Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:19:59.3289449Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:20:04.348961123Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:20:04.445892971Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:20:09.471251141Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:20:09.471531653Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:20:14.498423157Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:20:14.498634282Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:20:19.520328196Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:20:19.520527112Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:20:24.541766269Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:20:24.542169394Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:20:29.564314726Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:20:29.564586641Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:20:34.585192675Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:20:34.585398563Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:20:39.6053725Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:20:39.605674573Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:20:44.621748701Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:20:44.621948014Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:20:49.639330655Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:20:49.639565838Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:20:54.658834484Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:20:54.659071533Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:20:59.739171121Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:20:59.739384192Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:21:04.762823935Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:21:04.763286409Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:21:09.782808717Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:21:09.783063587Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:21:14.802627081Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:21:14.802909117Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:21:19.821665245Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:21:19.821857577Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:21:24.839173008Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:21:24.839394288Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:21:29.860943951Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:21:29.861346474Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:21:34.891574456Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:21:34.892043312Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:21:39.919482489Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:21:39.91977975Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:21:44.94619944Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:21:44.946616489Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:21:49.970670165Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:21:49.971050005Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:21:55.007555076Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:21:55.008062717Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:22:00.038497051Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:22:00.038873654Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:22:05.091001176Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:22:05.091447435Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:22:10.110244196Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:22:10.110533017Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:22:15.128470485Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:22:15.128915614Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:22:20.155106333Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:22:20.15532438Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:22:25.178547421Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:22:25.178897389Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:22:30.200277837Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:22:30.200515043Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:22:35.2209279Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:22:35.221168475Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:22:40.24439987Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:22:40.244641374Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:22:45.26347665Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:22:45.263812632Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:22:50.284469962Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:22:50.284765488Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:22:55.308834781Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:22:55.335185844Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:23:00.35291821Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:23:00.353134629Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:23:05.371234209Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:23:05.371444306Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:23:10.393037658Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:23:10.393281991Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:23:15.412377564Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:23:15.412640223Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:23:20.436468896Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:23:20.436698166Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:23:25.4558044Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:23:25.456046165Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:23:30.487361865Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:23:30.487783306Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:23:35.505988913Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:23:35.506203075Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:23:40.532740275Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:23:40.533021782Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:23:45.556831008Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:23:45.557347693Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:23:50.588943761Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:23:50.589331875Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:23:55.61698351Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:23:55.617220989Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:24:00.635540014Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:24:00.635765456Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:24:05.655052974Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:24:05.655304873Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:24:10.687760782Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:24:10.688173343Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:24:15.741384116Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:24:15.770929515Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:24:20.789025099Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:24:20.789260093Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:24:25.818228872Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:24:25.818598775Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:24:30.842409594Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:24:30.842608908Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:24:35.865470422Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:24:35.865728562Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:24:40.883806576Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:24:40.884055665Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:24:45.904834125Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:24:45.90506711Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:24:50.929978309Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:24:50.930162831Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:24:55.948859645Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:24:55.949059925Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:25:00.975094062Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:25:00.975419066Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:25:05.995933687Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:25:05.996202406Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:25:11.016867418Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:25:11.017191557Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:25:16.035993831Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:25:16.036265975Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:25:21.054531886Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:25:21.054761114Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:25:26.078420117Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:25:26.0787052Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:25:31.098879389Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:25:31.099204815Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:25:36.134794433Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:25:36.13540752Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:25:41.155273788Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:25:41.155500659Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:25:46.183761658Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:25:46.184394788Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:25:51.203835452Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:25:51.20409158Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:25:56.243065257Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:25:56.243449838Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
ts=2022-08-08T20:26:01.277705016Z caller=instance.go:307 level=error agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 msg="failed to initialize instance" err="error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory"
ts=2022-08-08T20:26:01.278215977Z caller=manager.go:268 level=error agent=prometheus msg="instance stopped abnormally, restarting after backoff period" err="failed to initialize instance: error creating WAL: open WAL segment 0: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal/00000000: no such file or directory" backoff=5s instance=4348e35bd74bc15bb464b7c1cb114b95
[ OK ]
ts=2022-08-08T20:26:02Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
[ OK ]
ts=2022-08-10T21:21:04Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
[ OK ]
ts=2022-08-10T21:21:12Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
[ OK ]
ts=2022-08-10T22:03:51Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
[ OK ]
ts=2022-08-10T23:47:26Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
[ OK ]
ts=2022-08-10T23:47:35Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-12T10:53:09.080699902Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (250000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T10:53:09.144843616Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (250000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T10:54:14.05160159Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (250000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T10:54:14.094365513Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (250000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T10:55:19.099529112Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (250000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T10:55:19.142364619Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (250000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T10:56:24.140387674Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (250000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T10:56:24.183166887Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (250000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T10:57:29.18537423Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (250000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T10:57:29.568553812Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (250000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T10:58:53.881909386Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (250000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T10:58:53.998278851Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (250000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T11:02:10.06595169Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (250000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T11:02:10.115908888Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (250000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T11:03:39.274915257Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (250000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T11:05:15.755245921Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="context canceled"
ts=2022-08-12T11:05:15.758674424Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush all samples on shutdown" count=1999
ts=2022-08-12T11:05:57.120474561Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T10:56:07.857Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"1\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"irq\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:05:57.22128193Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T10:56:07.857Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"1\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"user\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:06:30.586743194Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T10:57:07.857Z, series={__name__=\"node_nf_conntrack_stat_insert_failed\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:10:41.905982056Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T10:59:01.587Z, series={__name__=\"namedprocess_namegroup_minor_page_faults_total\", groupname=\"exim\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:11:11.212394737Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:00:07.857Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"7\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"user\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:12:08.674929266Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:01:07.857Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"SetClientIDConfirm\", proto=\"4\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:15:03.327811059Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:07:01.587Z, series={__name__=\"namedprocess_namegroup_thread_cpu_seconds_total\", groupname=\"savd\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", mode=\"user\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"savd\"}"
ts=2022-08-12T11:15:40.870627554Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:08:07.857Z, series={__name__=\"node_filesystem_free_bytes\", agent_hostname=\"cloud14.hostgator.com\", device=\"/dev/vde\", fstype=\"ext4\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mountpoint=\"/home4\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:15:58.085065542Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:08:07.857Z, series={__name__=\"node_filesystem_free_bytes\", agent_hostname=\"cloud14.hostgator.com\", device=\"tmpfs\", fstype=\"tmpfs\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mountpoint=\"/home\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:16:09.683065984Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:11:01.587Z, series={__name__=\"namedprocess_namegroup_thread_minor_page_faults_total\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"client-soap-thr\"}"
ts=2022-08-12T11:16:45.204517057Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:12:07.857Z, series={__name__=\"node_netstat_Icmp6_InMsgs\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:16:49.58700455Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:11:01.587Z, series={__name__=\"namedprocess_namegroup_threads_wchan\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", wchan=\"ep_poll\"}"
ts=2022-08-12T11:18:05.522379626Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:12:07.857Z, series={__name__=\"node_netstat_Tcp_OutRsts\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:18:41.118408383Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:14:01.587Z, series={__name__=\"go_gc_duration_seconds\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", quantile=\"1\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:20:31.019013532Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:16:07.857Z, series={__name__=\"node_network_transmit_bytes_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"lo\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:20:47.104587859Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:16:07.857Z, series={__name__=\"node_network_transmit_errs_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"eth0\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:21:02.326188516Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:18:01.587Z, series={__name__=\"namedprocess_namegroup_memory_bytes\", groupname=\"pure-ftpd\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", memtype=\"proportionalSwapped\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:21:31.179277603Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:19:07.857Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"5\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"steal\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:21:43.06485869Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:18:01.587Z, series={__name__=\"namedprocess_namegroup_memory_bytes\", groupname=\"pure-ftpd\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", memtype=\"virtual\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:22:19.405267204Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=880 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:20:07.857Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"Rename\", proto=\"2\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:28:27.622180772Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:25:55.188Z, series={__name__=\"scrape_samples_post_metric_relabeling\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9636\", job=\"metrics/exim\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:31:41.370575468Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:28:55.188Z, series={__name__=\"scrape_samples_post_metric_relabeling\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9636\", job=\"metrics/exim\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:32:15.377767818Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:30:01.587Z, series={__name__=\"scrape_series_added\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:33:41.082288168Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:26:01.587Z, series={__name__=\"promhttp_metric_handler_requests_total\", code=\"503\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:34:29.878582777Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:31:07.857Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"MkNod\", proto=\"3\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:36:07.359959943Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:31:07.857Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"Null\", proto=\"2\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:36:47.219244226Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:33:01.587Z, series={__name__=\"namedprocess_namegroup_states\", groupname=\"httpd\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", state=\"Running\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:37:51.690833114Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:34:07.857Z, series={__name__=\"node_disk_io_time_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"vde\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:39:12.228782884Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:37:01.587Z, series={__name__=\"namedprocess_namegroup_thread_count\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"TaniumCX\"}"
ts=2022-08-12T11:39:20.617970588Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:34:07.857Z, series={__name__=\"node_disk_io_time_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"vdb\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:51:36.556074195Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:38:07.857Z, series={__name__=\"node_filesystem_avail_bytes\", agent_hostname=\"cloud14.hostgator.com\", device=\"none\", fstype=\"tmpfs\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mountpoint=\"/var/spool/exim\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T11:51:51.727402763Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=992 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T11:42:07.857Z, series={__name__=\"node_intr_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T12:47:08.871197408Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-12T11:43:07.857Z, series={__name__=\"node_timex_pps_stability_hertz\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T12:47:19.179549133Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-12T11:45:01.587Z, series={__name__=\"namedprocess_namegroup_worst_fd_ratio\", groupname=\"mysqld\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T12:47:19.251400304Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-12T11:46:07.857Z, series={__name__=\"node_netstat_Udp_RcvbufErrors\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T12:53:45.676624324Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-12T11:49:07.857Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"12\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"steal\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T12:53:49.711702447Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-12T11:50:07.857Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"CreateSession\", proto=\"4\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T12:54:14.524485157Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-12T11:50:07.857Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"Commit\", proto=\"3\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
[ OK ]
ts=2022-08-12T13:52:27Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-12T13:53:27.214694351Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="context canceled"
ts=2022-08-12T13:53:27.214866666Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush all samples on shutdown" count=12994
ts=2022-08-12T13:54:27.216058775Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush metadata"
ts=2022-08-12T13:54:27.216306464Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="context canceled"
ts=2022-08-12T13:57:17.560302273Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="context canceled"
ts=2022-08-12T13:57:17.560481846Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush all samples on shutdown" count=2999
ts=2022-08-12T14:04:02.010053901Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=743 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:02:17.578Z, series={__name__=\"cpanel_users_active\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:59117\", job=\"metrics/cpanel\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:12:39.866397284Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:02:17.578Z, series={__name__=\"cpanel_users_suspended\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:59117\", job=\"metrics/cpanel\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:13:11.811942113Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:04:01.58Z, series={__name__=\"process_max_fds\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:13:21.517386068Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=998 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:08:07.858Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"15\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"user\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:13:58.244203107Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:09:07.858Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"GetDeviceList\", proto=\"4\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:14:04.076807275Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:05:07.858Z, series={__name__=\"node_network_carrier_up_changes_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"lo\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:15:03.617705522Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:07:01.58Z, series={__name__=\"namedprocess_namegroup_major_page_faults_total\", groupname=\"pure-ftpd\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:16:33.83016635Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:09:07.858Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"GetLeaseTime\", proto=\"4\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:18:53.175505067Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:15:01.593Z, series={__name__=\"namedprocess_namegroup_thread_context_switches_total\", ctxswitchtype=\"voluntary\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"topology-connec\"}"
ts=2022-08-12T14:20:30.98864379Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:11:01.58Z, series={__name__=\"namedprocess_namegroup_open_filedesc\", groupname=\"exim\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:22:39.484961322Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:12:07.858Z, series={__name__=\"node_disk_io_time_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"vda\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:23:01.11778752Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:13:07.858Z, series={__name__=\"node_schedstat_timeslices_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"9\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:27:44.188573267Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:16:07.858Z, series={__name__=\"node_filesystem_avail_bytes\", agent_hostname=\"cloud14.hostgator.com\", device=\"/dev/vdc\", fstype=\"ext4\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mountpoint=\"/home3\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:32:27.380125868Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:23:01.58Z, series={__name__=\"namedprocess_scrape_errors\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:35:28.123320987Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:19:01.585Z, series={__name__=\"namedprocess_namegroup_thread_io_bytes_total\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", iomode=\"write\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"cx-channel-writ\"}"
ts=2022-08-12T14:35:58.431195826Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:20:07.858Z, series={__name__=\"node_load1\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:41:52.82003469Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:26:01.587Z, series={__name__=\"namedprocess_namegroup_cpu_seconds_total\", groupname=\"httpd\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", mode=\"system\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:42:39.076647543Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:27:07.858Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"13\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"nice\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:45:50.532056546Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:23:01.58Z, series={__name__=\"process_resident_memory_bytes\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:46:25.66951332Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:28:07.858Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"Link\", proto=\"3\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:46:48.290164084Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:24:07.858Z, series={__name__=\"node_network_carrier\", agent_hostname=\"cloud14.hostgator.com\", device=\"lo\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:47:33.777300095Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:26:01.587Z, series={__name__=\"namedprocess_namegroup_cpu_seconds_total\", groupname=\"savd\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", mode=\"user\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:49:42.016069883Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:27:07.858Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"13\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"system\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:50:06.814011353Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:32:07.858Z, series={__name__=\"node_schedstat_timeslices_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"10\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:51:02.320151031Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:34:01.58Z, series={__name__=\"namedprocess_namegroup_thread_context_switches_total\", ctxswitchtype=\"voluntary\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"cx-channel-writ\"}"
ts=2022-08-12T14:51:23.016734548Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:35:07.858Z, series={__name__=\"node_entropy_available_bits\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:53:03.824351496Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:31:07.858Z, series={__name__=\"node_disk_discards_merged_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"vda\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:54:35.120190143Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:34:01.58Z, series={__name__=\"namedprocess_namegroup_thread_context_switches_total\", ctxswitchtype=\"voluntary\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"server-challeng\"}"
ts=2022-08-12T14:55:47.709028831Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:36:07.858Z, series={__name__=\"node_sockstat_RAW_inuse\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T14:56:56.88457416Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:38:01.58Z, series={__name__=\"namedprocess_namegroup_thread_io_bytes_total\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", iomode=\"write\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"TaniumDetectEng\"}"
ts=2022-08-12T15:02:52.118685317Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:38:01.58Z, series={__name__=\"namedprocess_namegroup_thread_io_bytes_total\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", iomode=\"write\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"bridged-ipc-1\"}"
ts=2022-08-12T15:02:52.120046069Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:39:07.858Z, series={__name__=\"node_filesystem_size_bytes\", agent_hostname=\"cloud14.hostgator.com\", device=\"50.6.132.48:/backup3\", fstype=\"nfs4\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mountpoint=\"/backup3\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:05:06.112314684Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:40:07.858Z, series={__name__=\"node_timex_tick_seconds\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:05:30.209279678Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:42:01.625Z, series={__name__=\"namedprocess_namegroup_worst_fd_ratio\", groupname=\"lsphp\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:09:13.28857019Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:42:01.625Z, series={__name__=\"namedprocess_namegroup_worst_fd_ratio\", groupname=\"httpd\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:10:23.185037737Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:45:01.58Z, series={__name__=\"namedprocess_namegroup_context_switches_total\", ctxswitchtype=\"nonvoluntary\", groupname=\"mysqld\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:11:17.913230138Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:46:07.858Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"0\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"nice\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:12:48.828134554Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:50:07.858Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"7\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"nice\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:12:53.467244768Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:53:01.58Z, series={__name__=\"namedprocess_namegroup_states\", groupname=\"sshd_client\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", state=\"Waiting\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:13:46.886821806Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:54:07.858Z, series={__name__=\"node_disk_writes_merged_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"vda\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:14:07.622133583Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:51:07.858Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"TestStateID\", proto=\"4\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:14:39.173313156Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:55:07.858Z, series={__name__=\"node_scrape_collector_duration_seconds\", agent_hostname=\"cloud14.hostgator.com\", collector=\"rapl\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:16:54.784771013Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:57:01.58Z, series={__name__=\"namedprocess_namegroup_thread_cpu_seconds_total\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", mode=\"system\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"topology-send\"}"
ts=2022-08-12T15:18:34.735096808Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:58:07.858Z, series={__name__=\"node_filesystem_files_free\", agent_hostname=\"cloud14.hostgator.com\", device=\"/dev/vdc\", fstype=\"ext4\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mountpoint=\"/home3\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:19:40.509320059Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:59:07.858Z, series={__name__=\"node_softnet_processed_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"3\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:19:54.321012881Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:55:07.858Z, series={__name__=\"node_scrape_collector_duration_seconds\", agent_hostname=\"cloud14.hostgator.com\", collector=\"uname\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:20:25.918092458Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:01:01.584Z, series={__name__=\"namedprocess_namegroup_thread_major_page_faults_total\", groupname=\"httpd\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"httpd\"}"
ts=2022-08-12T15:23:19.56899206Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T14:58:07.858Z, series={__name__=\"node_filesystem_files_free\", agent_hostname=\"cloud14.hostgator.com\", device=\"50.6.132.48:/backup4\", fstype=\"nfs4\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mountpoint=\"/backup4\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:24:54.286945763Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:02:07.858Z, series={__name__=\"node_memory_PageTables_bytes\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:27:42.72600845Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:01:01.584Z, series={__name__=\"namedprocess_namegroup_thread_major_page_faults_total\", groupname=\"savd\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"savscand\"}"
ts=2022-08-12T15:29:12.427933005Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:06:07.858Z, series={__name__=\"node_network_receive_compressed_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"lo\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:30:08.124250171Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:08:01.58Z, series={__name__=\"namedprocess_namegroup_memory_bytes\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", memtype=\"virtual\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:30:13.691869675Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:03:59.664Z, series={__name__=\"scrape_series_added\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9117\", job=\"metrics/apache\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:31:23.816588223Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:06:07.858Z, series={__name__=\"node_network_receive_drop_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"eth0\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:31:50.449946151Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:08:01.58Z, series={__name__=\"namedprocess_namegroup_memory_bytes\", groupname=\"exim\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", memtype=\"proportionalResident\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:32:28.189338591Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:09:07.858Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"3\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"steal\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:33:23.490936298Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:10:07.858Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"Read\", proto=\"3\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:35:09.386147068Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:09:07.858Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"3\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"idle\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:37:07.778980115Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:12:01.583Z, series={__name__=\"namedprocess_namegroup_states\", groupname=\"exim\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", state=\"Waiting\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:38:14.215279976Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:14:07.858Z, series={__name__=\"node_schedstat_waiting_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"4\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
[ OK ]
ts=2022-08-12T15:42:51Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-12T15:43:51.704608462Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="context canceled"
ts=2022-08-12T15:43:51.704740539Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush all samples on shutdown" count=2999
ts=2022-08-12T15:46:51.870065201Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="context canceled"
ts=2022-08-12T15:46:51.871734737Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=435 exemplarCount=0 err="context canceled"
ts=2022-08-12T15:47:29.430105311Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=742 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:45:55.188Z, series={__name__=\"scrape_duration_seconds\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9636\", job=\"metrics/exim\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:47:49.298926629Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=741 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:45:55.188Z, series={__name__=\"up\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9636\", job=\"metrics/exim\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
[ OK ]
ts=2022-08-12T15:49:38.397761324Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="error tailing WAL" err="segments: open /tmp/grafana-agent-wal/4348e35bd74bc15bb464b7c1cb114b95/wal: no such file or directory"
ts=2022-08-12T15:49:38Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-12T15:50:38.453531309Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="context canceled"
ts=2022-08-12T15:50:38.453559781Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=3 exemplarCount=0 err="context canceled"
ts=2022-08-12T15:50:38.453650952Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush all samples on shutdown" count=969
ts=2022-08-12T15:51:38.454925333Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush metadata"
ts=2022-08-12T15:51:38.455184019Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="context canceled"
ts=2022-08-12T15:51:38.455474969Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="context canceled"
ts=2022-08-12T15:53:38.497676395Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="context canceled"
ts=2022-08-12T15:53:38.499339679Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="context canceled"
ts=2022-08-12T15:57:14.212255943Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=964 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:52:07.858Z, series={__name__=\"node_network_carrier_changes_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"lo\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:58:09.133340971Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:53:55.188Z, series={__name__=\"exim_reject_total\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9636\", job=\"metrics/exim\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T15:58:28.577008116Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:55:07.858Z, series={__name__=\"node_cooling_device_cur_state\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", name=\"6\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", type=\"Processor\"}"
ts=2022-08-12T16:00:01.216661981Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:54:55.188Z, series={__name__=\"exim_queue\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9636\", job=\"metrics/exim\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:00:33.682559201Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T15:56:07.858Z, series={__name__=\"node_cooling_device_cur_state\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", name=\"11\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", type=\"Processor\"}"
ts=2022-08-12T16:03:28.497925175Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="context canceled"
ts=2022-08-12T16:03:28.49854188Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=484 exemplarCount=0 err="context canceled"
ts=2022-08-12T16:04:38.499160686Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="context canceled"
ts=2022-08-12T16:04:38.499810898Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=482 exemplarCount=0 err="context canceled"
ts=2022-08-12T16:08:47.696914085Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:05:37.517Z, series={__name__=\"bind_up\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9119\", job=\"metrics/bind\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:09:49.678870502Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:07:07.858Z, series={__name__=\"node_cooling_device_cur_state\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", name=\"12\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", type=\"Processor\"}"
ts=2022-08-12T16:10:40.120929006Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=947 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:08:07.858Z, series={__name__=\"node_network_receive_compressed_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"lo\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:12:40.388790273Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=740 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:06:59.664Z, series={__name__=\"scrape_series_added\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9117\", job=\"metrics/apache\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:16:18.275571592Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=792 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:10:55.188Z, series={__name__=\"scrape_series_added\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9636\", job=\"metrics/exim\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:16:23.29197193Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:09:07.858Z, series={__name__=\"node_cooling_device_cur_state\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", name=\"9\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", type=\"Processor\"}"
ts=2022-08-12T16:16:37.134259031Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:12:01.597Z, series={__name__=\"namedprocess_namegroup_memory_bytes\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", memtype=\"virtual\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:16:59.018882432Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:10:07.858Z, series={__name__=\"node_network_transmit_compressed_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"lo\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:17:14.985486427Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=987 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:14:07.858Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"OpenDowngrade\", proto=\"4\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:18:18.875817966Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:14:07.858Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"Link\", proto=\"3\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:19:40.006866659Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:17:07.858Z, series={__name__=\"node_disk_io_now\", agent_hostname=\"cloud14.hostgator.com\", device=\"vda\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:20:56.59780527Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:17:07.858Z, series={__name__=\"node_disk_io_now\", agent_hostname=\"cloud14.hostgator.com\", device=\"vdb\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:22:15.235460103Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:18:07.858Z, series={__name__=\"node_schedstat_waiting_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"2\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:25:14.373260341Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:20:55.188Z, series={__name__=\"up\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9636\", job=\"metrics/exim\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:26:13.726807745Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:23:07.858Z, series={__name__=\"node_network_receive_frame_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"lo\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:32:05.349077Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:26:07.858Z, series={__name__=\"node_network_protocol_type\", agent_hostname=\"cloud14.hostgator.com\", device=\"eth0\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:32:59.779827769Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:29:07.86Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"15\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"system\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:33:29.588781345Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:29:07.86Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"2\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"nice\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:33:33.286260104Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:30:07.858Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"Read\", proto=\"4\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:37:47.819979778Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:33:07.858Z, series={__name__=\"node_disk_io_time_weighted_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"vda\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:38:28.221146267Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=985 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:34:07.858Z, series={__name__=\"node_schedstat_waiting_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"7\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:41:48.088740571Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:30:07.858Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"MkDir\", proto=\"3\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:42:03.833145551Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:32:01.582Z, series={__name__=\"namedprocess_namegroup_open_filedesc\", groupname=\"sshd_client\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:43:01.577040131Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:33:07.858Z, series={__name__=\"node_disk_discards_completed_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"vdf\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:44:37.118209643Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=995 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:38:07.858Z, series={__name__=\"node_scrape_collector_success\", agent_hostname=\"cloud14.hostgator.com\", collector=\"diskstats\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:46:35.608010335Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:36:01.582Z, series={__name__=\"namedprocess_namegroup_thread_context_switches_total\", ctxswitchtype=\"voluntary\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"TaniumDetectEng\"}"
ts=2022-08-12T16:46:35.7121984Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:40:01.582Z, series={__name__=\"namedprocess_namegroup_thread_cpu_seconds_total\", groupname=\"httpd\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", mode=\"user\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"PassengerAgent\"}"
ts=2022-08-12T16:47:50.809684825Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=998 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:41:07.858Z, series={__name__=\"node_filesystem_free_bytes\", agent_hostname=\"cloud14.hostgator.com\", device=\"50.6.132.48:/backup3\", fstype=\"nfs4\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mountpoint=\"/backup3\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:53:54.264850964Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:37:07.858Z, series={__name__=\"node_dmi_processor\", agent_hostname=\"cloud14.hostgator.com\", corecount=\"1\", family=\"Other\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", manufacturer=\"Red Hat\", maxspeed=\"2000 MHz\", socketdesignation=\"CPU 0\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:55:00.79787009Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:38:07.858Z, series={__name__=\"node_scrape_collector_success\", agent_hostname=\"cloud14.hostgator.com\", collector=\"dmi\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:57:49.382339942Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=998 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:42:07.858Z, series={__name__=\"node_softnet_times_squeezed_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"5\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T16:59:22.269498408Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:44:01.582Z, series={__name__=\"namedprocess_namegroup_thread_minor_page_faults_total\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"topology-close\"}"
ts=2022-08-12T17:00:24.814233961Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:42:07.858Z, series={__name__=\"node_softnet_times_squeezed_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"8\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:00:31.411181065Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:45:07.858Z, series={__name__=\"node_netstat_TcpExt_ListenDrops\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:02:13.091671481Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:45:07.858Z, series={__name__=\"node_netstat_TcpExt_ListenOverflows\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:03:39.515318646Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:47:01.582Z, series={__name__=\"go_memstats_heap_idle_bytes\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:03:41.31461445Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:48:07.858Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"10\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"softirq\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:04:10.196698527Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:49:07.858Z, series={__name__=\"node_nfs_packets_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", protocol=\"udp\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:05:59.227243012Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:51:01.582Z, series={__name__=\"namedprocess_namegroup_num_threads\", groupname=\"lsphp\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:06:22.139486162Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:48:07.858Z, series={__name__=\"node_cooling_device_max_state\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", name=\"7\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", type=\"Processor\"}"
ts=2022-08-12T17:13:22.941974741Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:49:07.858Z, series={__name__=\"node_network_transmit_compressed_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"lo\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:15:04.706997741Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=998 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:55:01.582Z, series={__name__=\"namedprocess_namegroup_states\", groupname=\"lsphp\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", state=\"Sleeping\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:15:11.931566158Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:53:07.858Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"ReadDir\", proto=\"2\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:17:34.022350673Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:57:07.858Z, series={__name__=\"node_scrape_collector_duration_seconds\", agent_hostname=\"cloud14.hostgator.com\", collector=\"dmi\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:17:55.613413621Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T16:59:01.582Z, series={__name__=\"namedprocess_namegroup_thread_count\", groupname=\"mysqld\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"mysqld\"}"
ts=2022-08-12T17:18:16.525638492Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:00:07.858Z, series={__name__=\"node_filesystem_files\", agent_hostname=\"cloud14.hostgator.com\", device=\"/dev/vdb\", fstype=\"ext4\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mountpoint=\"/home1\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:19:27.271019454Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=995 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:01:07.858Z, series={__name__=\"node_sockstat_UDP_mem\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:19:50.377783908Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:01:07.858Z, series={__name__=\"node_softnet_dropped_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"2\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:20:48.717587662Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:03:01.582Z, series={__name__=\"namedprocess_namegroup_thread_major_page_faults_total\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"LogSerializingT\"}"
ts=2022-08-12T17:21:48.74149615Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:05:17.579Z, series={__name__=\"scrape_samples_scraped\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:59117\", job=\"metrics/cpanel\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:23:11.971027473Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:10:01.582Z, series={__name__=\"namedprocess_namegroup_cpu_seconds_total\", groupname=\"pure-ftpd\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", mode=\"system\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:24:05.622133445Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:05:25.855Z, series={__name__=\"mysql_up\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:3306\", job=\"integrations/mysqld_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:26:51.48024307Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:07:07.858Z, series={__name__=\"node_boot_time_seconds\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:27:58.080988517Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:08:07.858Z, series={__name__=\"node_network_net_dev_group\", agent_hostname=\"cloud14.hostgator.com\", device=\"lo\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:28:57.015162299Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:14:01.582Z, series={__name__=\"namedprocess_namegroup_states\", groupname=\"httpd\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", state=\"Other\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:30:43.371029895Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:10:01.582Z, series={__name__=\"namedprocess_namegroup_memory_bytes\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", memtype=\"proportionalResident\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:31:56.2036699Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:12:07.862Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"Read\", proto=\"2\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:32:28.576369492Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:16:07.858Z, series={__name__=\"node_scrape_collector_duration_seconds\", agent_hostname=\"cloud14.hostgator.com\", collector=\"entropy\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:33:22.707487852Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:16:07.858Z, series={__name__=\"node_scrape_collector_duration_seconds\", agent_hostname=\"cloud14.hostgator.com\", collector=\"loadavg\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:34:10.518046533Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:19:07.858Z, series={__name__=\"node_filesystem_device_error\", agent_hostname=\"cloud14.hostgator.com\", device=\"tmpfs\", fstype=\"tmpfs\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mountpoint=\"/home\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:34:19.578267057Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:19:07.858Z, series={__name__=\"node_filesystem_files\", agent_hostname=\"cloud14.hostgator.com\", device=\"50.6.132.48:/backup3\", fstype=\"nfs4\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mountpoint=\"/backup3\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:35:49.12556694Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:20:07.858Z, series={__name__=\"node_softnet_processed_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"11\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:38:43.828573303Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:22:01.585Z, series={__name__=\"namedprocess_namegroup_thread_major_page_faults_total\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"topology-send\"}"
ts=2022-08-12T17:39:08.809929483Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:22:01.585Z, series={__name__=\"namedprocess_namegroup_thread_io_bytes_total\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", iomode=\"write\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"cx-channel-read\"}"
ts=2022-08-12T17:39:40.33132945Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:24:07.858Z, series={__name__=\"node_timex_tick_seconds\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:40:54.49021442Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:27:07.858Z, series={__name__=\"node_network_address_info\", address=\"108.167.165.115\", agent_hostname=\"cloud14.hostgator.com\", device=\"eth0\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", netmask=\"32\", scope=\"global\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:41:23.476176299Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:24:55.188Z, series={__name__=\"scrape_series_added\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9636\", job=\"metrics/exim\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:43:02.931137423Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:26:07.858Z, series={__name__=\"node_cooling_device_cur_state\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", name=\"8\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", type=\"Processor\"}"
ts=2022-08-12T17:43:39.767660975Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:27:07.858Z, series={__name__=\"node_network_transmit_drop_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"eth0\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:46:24.413135985Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:30:07.858Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"6\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"idle\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:46:41.437801291Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:31:07.858Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"Setattr\", proto=\"4\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:47:06.464298589Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:31:07.858Z, series={__name__=\"node_nfs_requests_total\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", method=\"Sequence\", proto=\"4\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:48:09.845293713Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:34:07.858Z, series={__name__=\"node_disk_reads_completed_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"vdf\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:49:08.527231637Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:37:01.582Z, series={__name__=\"namedprocess_namegroup_thread_cpu_seconds_total\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", mode=\"user\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"client-soap-thr\"}"
ts=2022-08-12T17:49:24.986814736Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:35:07.858Z, series={__name__=\"node_scrape_collector_duration_seconds\", agent_hostname=\"cloud14.hostgator.com\", collector=\"netclass\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:50:09.82362862Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:38:07.858Z, series={__name__=\"node_filesystem_free_bytes\", agent_hostname=\"cloud14.hostgator.com\", device=\"/dev/vde\", fstype=\"ext4\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mountpoint=\"/home4\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:51:58.133562489Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:41:01.582Z, series={__name__=\"namedprocess_namegroup_thread_io_bytes_total\", groupname=\"savd\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", iomode=\"write\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"savscand\"}"
ts=2022-08-12T17:52:47.314569962Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:42:07.858Z, series={__name__=\"node_netstat_TcpExt_SyncookiesFailed\", agent_hostname=\"cloud14.hostgator.com\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:53:36.622388646Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:49:07.858Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"8\", host=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"idle\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:53:40.48388466Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T17:48:01.582Z, series={__name__=\"namedprocess_namegroup_cpu_seconds_total\", groupname=\"lsphp\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", mode=\"system\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T17:54:08.332707159Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (166666.66666666666) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-12T17:54:57.873450035Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (166666.66666666666) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T17:55:06.386301107Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=511 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (166666.66666666666) exceeded while adding 511 samples and 0 metadata"
ts=2022-08-12T17:56:40.479780709Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (166666.66666666666) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T17:56:44.128765873Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (166666.66666666666) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T18:02:14.986297526Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-12T18:02:30.995632677Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (142857.14285714287) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-12T18:02:43.067987261Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=921 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (166666.66666666666) exceeded while adding 921 samples and 0 metadata"
[ OK ]
ts=2022-08-12T19:56:06Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
[ OK ]
ts=2022-08-12T19:56:14Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-12T20:02:15.003800713Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:03:13.522908671Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T20:03:15.071270585Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:04:33.093145875Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:04:33.13620978Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T20:10:40.805542223Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:10:41.024566689Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T20:15:04.955270797Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="context canceled"
ts=2022-08-12T20:15:04.955436668Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush all samples on shutdown" count=1999
ts=2022-08-12T20:16:08.46672809Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T20:04:54.714Z, series={__name__=\"node_nfs_connections_total\", agent_hostname=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T20:17:34.551298322Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T20:04:54.714Z, series={__name__=\"node_nf_conntrack_stat_early_drop\", agent_hostname=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T20:17:42.241174021Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T20:06:01.585Z, series={__name__=\"namedprocess_namegroup_num_threads\", groupname=\"lsphp\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T20:17:53.424815947Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T20:07:54.714Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"8\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"irq\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T20:17:58.285937802Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-12T20:19:02.51818481Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=991 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 991 samples and 0 metadata"
ts=2022-08-12T20:19:08.615954358Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:19:11.515265786Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T20:20:15.071994489Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:20:28.164976085Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T20:21:15.175930708Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:22:33.076808988Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:22:33.1192464Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T20:23:12.409281217Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-12T20:23:46.439673268Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (142857.14285714287) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-12T20:24:30.601991754Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-12T20:24:46.340944306Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=488 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 488 samples and 0 metadata"
ts=2022-08-12T20:24:53.611868498Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:24:56.563234994Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=470 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 470 samples and 0 metadata"
ts=2022-08-12T20:26:04.957909763Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=8 exemplarCount=0 err="context canceled"
ts=2022-08-12T20:26:04.95795362Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=5 exemplarCount=0 err="context canceled"
ts=2022-08-12T20:26:04.9580253Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush all samples on shutdown" count=6061
ts=2022-08-12T20:26:38.288916113Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-12T20:26:38.3375743Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=925 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 925 samples and 0 metadata"
ts=2022-08-12T20:27:02.926938449Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T20:27:03.160497221Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:28:03.417994633Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:35:51.377316975Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-12T20:35:56.554363845Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=989 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 989 samples and 0 metadata"
ts=2022-08-12T20:38:00.186166271Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:38:00.22476891Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T20:39:15.095204988Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:39:19.217089378Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T20:42:18.391048396Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:43:16.808944932Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T20:44:02.332575908Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-12T20:44:20.003094152Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T20:39:54.714Z, series={__name__=\"node_sockstat_FRAG6_memory\", agent_hostname=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T20:44:35.360889248Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=965 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T20:42:54.714Z, series={__name__=\"node_filesystem_size_bytes\", agent_hostname=\"cloud14.hostgator.com\", device=\"tmpfs\", fstype=\"tmpfs\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mountpoint=\"/ramdisk\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T20:44:50.402517604Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=313 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 313 samples and 0 metadata"
ts=2022-08-12T20:44:53.233345736Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=315 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 315 samples and 0 metadata"
ts=2022-08-12T20:44:56.595979546Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=470 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 470 samples and 0 metadata"
ts=2022-08-12T20:45:03.312559042Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=256 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 256 samples and 0 metadata"
ts=2022-08-12T20:45:14.995122085Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:45:15.36331484Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T20:46:04.955182706Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=5 exemplarCount=0 err="context canceled"
ts=2022-08-12T20:46:04.955609756Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=252 exemplarCount=0 err="context canceled"
ts=2022-08-12T20:46:05.613793766Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-12T20:46:11.78054565Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=476 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 476 samples and 0 metadata"
ts=2022-08-12T20:47:15.1646542Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:47:15.374194298Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T20:48:43.110590651Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:48:43.47536847Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T20:54:45.668057619Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T20:58:34.955283235Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="context canceled"
ts=2022-08-12T20:58:34.957086983Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="Failed to flush all samples on shutdown" count=999
ts=2022-08-12T20:58:53.403852015Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-12T20:58:56.486589858Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T20:58:56.872099216Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-12T20:59:01.222913966Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=830 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 830 samples and 0 metadata"
ts=2022-08-12T21:00:03.597518197Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=251 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 251 samples and 0 metadata"
ts=2022-08-12T21:00:04.957791655Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=95 exemplarCount=0 err="context canceled"
ts=2022-08-12T21:00:10.707564042Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=737 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 737 samples and 0 metadata"
ts=2022-08-12T21:01:18.860779963Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=740 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 740 samples and 0 metadata"
ts=2022-08-12T21:06:52.704984099Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-12T21:07:42.369810409Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T21:00:29.128Z, series={__name__=\"mysql_global_variables_max_connections\", agent_hostname=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:3306\", job=\"integrations/mysqld_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T21:08:20.551002887Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=743 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (142857.14285714287) exceeded while adding 743 samples and 0 metadata"
ts=2022-08-12T21:08:22.734430852Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (142857.14285714287) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-12T21:08:48.030622049Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-12T21:08:53.754979546Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of order sample. timestamp=2022-08-12T21:03:01.581Z, series={__name__=\"namedprocess_namegroup_memory_bytes\", groupname=\"exim\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"metrics/process_exporter\", memtype=\"swapped\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-12T21:09:42.769634918Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=639 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 639 samples and 0 metadata"
[ OK ]
ts=2022-08-15T18:43:00Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
[ OK ]
ts=2022-08-15T19:14:05Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-15T19:14:05.888873248Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="error tailing WAL" err="readCheckpoint: readCheckpoint wasn't able to read all data from the checkpoint /opt/nf-observability/prometheus/wal/4348e35bd74bc15bb464b7c1cb114b95/wal/checkpoint.00000067/00000000, size: 65536, totalRead: 19311"
[ OK ]
ts=2022-08-16T15:04:42Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
[ OK ]
ts=2022-08-16T18:43:14Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
[ OK ]
ts=2022-08-17T14:45:41Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-17T16:16:18.760549956Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (83333.33333333333) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T16:16:20.254232789Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (83333.33333333333) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T16:17:21.922919283Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="context canceled"
ts=2022-08-17T16:17:21.924924107Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="context canceled"
ts=2022-08-17T16:17:27.775973852Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T14:49:35.603Z, series={__name__=\"namedprocess_namegroup_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", groupname=\"savscand\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"integrations/process_exporter\", mode=\"system\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:17:27.850270991Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T14:49:35.603Z, series={__name__=\"namedprocess_namegroup_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", groupname=\"sshd\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"integrations/process_exporter\", mode=\"user\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:17:28.043874151Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T14:50:35.603Z, series={__name__=\"namedprocess_namegroup_thread_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"integrations/process_exporter\", mode=\"user\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"TaniumClient\"}"
ts=2022-08-17T16:17:28.197992542Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T14:51:54.714Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"9\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"iowait\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:17:28.419996897Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T14:52:54.714Z, series={__name__=\"node_network_receive_fifo_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"eth0\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:17:29.427848521Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T14:50:35.603Z, series={__name__=\"namedprocess_namegroup_thread_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"integrations/process_exporter\", mode=\"system\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"staged-watcher\"}"
ts=2022-08-17T16:17:30.596673033Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T14:51:54.714Z, series={__name__=\"node_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"8\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mode=\"irq\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:17:30.969702574Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (90909.09090909091) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T16:17:32.47015384Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T14:56:54.714Z, series={__name__=\"node_cooling_device_cur_state\", agent_hostname=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", name=\"5\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", type=\"Processor\"}"
ts=2022-08-17T16:17:33.547173603Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T14:53:54.714Z, series={__name__=\"node_softnet_times_squeezed_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"5\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:17:33.939705908Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T14:55:35.603Z, series={__name__=\"namedprocess_namegroup_states\", agent_hostname=\"cloud14.hostgator.com\", groupname=\"TaniumCX\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"integrations/process_exporter\", state=\"Sleeping\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:17:34.758298616Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T14:56:35.603Z, series={__name__=\"namedprocess_scrape_procread_errors\", agent_hostname=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"integrations/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:19:08.969335871Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T14:57:54.714Z, series={__name__=\"node_filesystem_files_free\", agent_hostname=\"cloud14.hostgator.com\", device=\"/dev/vde\", fstype=\"ext4\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mountpoint=\"/home4\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:19:09.948581716Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T14:58:54.714Z, series={__name__=\"node_schedstat_timeslices_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"15\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:19:11.544639814Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T15:00:35.603Z, series={__name__=\"namedprocess_namegroup_memory_bytes\", agent_hostname=\"cloud14.hostgator.com\", groupname=\"TaniumDetectEng\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"integrations/process_exporter\", memtype=\"proportionalResident\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:19:30.150104732Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T14:57:54.714Z, series={__name__=\"node_filesystem_size_bytes\", agent_hostname=\"cloud14.hostgator.com\", device=\"/dev/loop0\", fstype=\"ext4\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", mountpoint=\"/var/tmp\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:19:32.451442201Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T14:58:54.714Z, series={__name__=\"node_scrape_collector_duration_seconds\", agent_hostname=\"cloud14.hostgator.com\", collector=\"cpufreq\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:21:31.340332744Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T15:01:35.603Z, series={__name__=\"namedprocess_namegroup_thread_cpu_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", groupname=\"nscd\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"integrations/process_exporter\", mode=\"user\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\", threadname=\"nscd\"}"
ts=2022-08-17T16:21:32.414227264Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T15:02:54.714Z, series={__name__=\"node_disk_discard_time_seconds_total\", agent_hostname=\"cloud14.hostgator.com\", device=\"vda\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:23:30.853144392Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T15:03:54.714Z, series={__name__=\"node_network_info\", address=\"fa:16:3e:f0:53:74\", agent_hostname=\"cloud14.hostgator.com\", broadcast=\"ff:ff:ff:ff:ff:ff\", device=\"eth0\", duplex=\"unknown\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", operstate=\"up\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:24:08.652311005Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=31 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T15:04:54.714Z, series={__name__=\"node_softnet_times_squeezed_total\", agent_hostname=\"cloud14.hostgator.com\", cpu=\"3\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:24:09.665129363Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T16:24:30.971771769Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T15:03:54.714Z, series={__name__=\"node_nf_conntrack_stat_insert_failed\", agent_hostname=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:12345\", job=\"integrations/node_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:24:35.785737411Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T15:04:59.555Z, series={__name__=\"apache_connections\", agent_hostname=\"cloud14.hostgator.com\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9117\", job=\"integrations/apache_http\", state=\"keepalive\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:24:40.64420243Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T15:06:35.603Z, series={__name__=\"namedprocess_namegroup_states\", agent_hostname=\"cloud14.hostgator.com\", groupname=\"savscand\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"integrations/process_exporter\", state=\"Zombie\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:25:09.91120276Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T16:25:20.589112902Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T16:25:33.808202342Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T16:25:37.514130083Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T15:11:35.603Z, series={__name__=\"namedprocess_namegroup_num_threads\", agent_hostname=\"cloud14.hostgator.com\", groupname=\"TaniumClient\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"integrations/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:26:08.786088095Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=985 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 985 samples and 0 metadata"
ts=2022-08-17T16:26:29.623925121Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T16:26:43.406353125Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=994 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 994 samples and 0 metadata"
ts=2022-08-17T16:27:13.925989656Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T16:27:16.960829194Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (22727.272727272728) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T16:27:25.469986476Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=988 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 988 samples and 0 metadata"
ts=2022-08-17T16:27:33.654646672Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 400 Bad Request: user=anonymous: err: out of bounds. timestamp=2022-08-17T15:22:35.603Z, series={__name__=\"namedprocess_namegroup_num_procs\", agent_hostname=\"cloud14.hostgator.com\", groupname=\"datacycle.pl\", hostname=\"cloud14.hostgator.com\", instance=\"cloud14.hostgator.com:9256\", job=\"integrations/process_exporter\", technical_service=\"Bluerock\", technical_service_offering=\"HGCloudSites\"}"
ts=2022-08-17T16:27:38.45435666Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T16:28:40.461008816Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T16:28:46.366553645Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T16:29:43.379771105Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T16:30:19.164102527Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T16:30:45.711587457Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T16:31:44.350254511Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T16:31:47.046837364Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T16:31:47.839860988Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T16:32:30.96823075Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=994 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 994 samples and 0 metadata"
ts=2022-08-17T16:32:47.46682801Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T16:33:30.8685701Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=807 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 807 samples and 0 metadata"
ts=2022-08-17T16:33:30.956433624Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=785 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 785 samples and 0 metadata"
ts=2022-08-17T16:34:57.40158488Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T16:35:37.61940411Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-17T16:37:41.979050524Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
[ OK ]
ts=2022-08-17T16:38:49Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-17T16:40:54.928449295Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-17T16:50:37.527447754Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=641 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 641 samples and 0 metadata"
ts=2022-08-17T16:51:37.791965711Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-17T16:55:38.688016834Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-17T16:57:39.255697725Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-17T16:59:59.772994959Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-17T17:02:49.359317672Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T17:02:50.54723089Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T17:02:55.660644162Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T17:04:50.422806006Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T17:04:50.46192518Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T17:05:50.425467577Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T17:05:50.632960132Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T17:07:49.357256843Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T17:07:49.39629621Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T17:08:50.464448411Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T17:09:49.288118559Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T17:10:49.367529892Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T17:10:49.481631122Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T17:11:49.61968594Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T17:11:49.658752728Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T17:12:50.422463106Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T17:12:50.633536082Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T17:14:14.201321142Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T17:14:22.180439506Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 999 samples and 0 metadata"
ts=2022-08-17T17:14:28.508880242Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=978 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 978 samples and 0 metadata"
ts=2022-08-17T17:14:28.911931155Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=970 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 970 samples and 0 metadata"
ts=2022-08-17T17:16:49.358548537Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T17:16:49.397511142Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T17:17:49.616242076Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T17:17:49.824507886Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T17:18:50.45523691Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T17:19:08.645739749Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T17:19:09.29059633Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 999 samples and 0 metadata"
ts=2022-08-17T17:19:14.341291941Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=941 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 941 samples and 0 metadata"
ts=2022-08-17T17:22:54.978096206Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-17T17:23:55.227627518Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-17T17:29:56.847511749Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T17:32:57.645501679Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T17:33:49.292504686Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T17:35:58.371305621Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T17:36:58.588185828Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T17:38:38.994446594Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=660 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 660 samples and 0 metadata"
ts=2022-08-17T17:41:39.712633713Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=658 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 658 samples and 0 metadata"
ts=2022-08-17T17:44:40.506685102Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-17T17:45:55.806770852Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T17:49:56.879610919Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T17:51:37.609323075Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=639 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 639 samples and 0 metadata"
ts=2022-08-17T17:51:50.429010066Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T17:51:50.538508207Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T17:51:57.782758653Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T17:53:49.361008031Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T17:53:49.399896356Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T17:54:36.936682741Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=28 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 28 samples and 0 metadata"
ts=2022-08-17T17:54:36.979150563Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T17:54:39.292338755Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 999 samples and 0 metadata"
ts=2022-08-17T17:54:44.372409471Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=466 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 466 samples and 0 metadata"
ts=2022-08-17T17:54:44.412238029Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=462 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 462 samples and 0 metadata"
ts=2022-08-17T17:54:59.294348142Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T17:55:51.414567898Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T17:55:51.453625975Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T17:56:12.83346397Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (20833.333333333332) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T17:59:58.704161441Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T18:00:58.933615392Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T18:01:39.197133774Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-17T18:01:59.274126772Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T18:05:55.255623096Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-17T18:12:57.195988085Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T18:14:37.687036153Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-17T18:14:49.289774565Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T18:14:49.502094908Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T18:14:57.80543732Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T18:15:49.358393969Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T18:15:49.567552301Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T18:16:57.36843961Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T18:17:49.459920862Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T18:18:49.72464784Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T18:19:49.896116316Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T18:19:51.063108329Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T18:21:49.357250683Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T18:21:49.465914589Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T18:21:59.374579424Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T18:21:59.684688522Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 999 samples and 0 metadata"
ts=2022-08-17T18:22:59.290943397Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T18:25:54.940404703Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-17T18:28:40.657580218Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-17T18:28:49.358154778Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T18:28:49.729701114Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T18:28:55.878246599Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T18:29:50.007589531Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T18:30:49.456687751Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T18:30:50.103254796Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T18:32:51.420448575Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T18:32:51.639174882Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T18:34:49.907014043Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T18:34:50.557486083Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T18:36:49.288119131Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T18:36:50.455169606Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T18:37:28.168624788Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T18:37:29.462852104Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 999 samples and 0 metadata"
ts=2022-08-17T18:37:37.652569683Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=371 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 371 samples and 0 metadata"
ts=2022-08-17T18:40:39.677505591Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-17T18:40:59.72207245Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-17T18:44:55.700338878Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T18:45:55.949095011Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T18:48:49.287628881Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T18:50:57.310199198Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T18:54:38.160890807Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-17T18:54:49.357706628Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T18:54:49.396825728Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T18:54:58.278862147Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T18:55:49.457981973Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T18:55:49.496927909Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T18:56:49.57693961Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T18:57:49.287792832Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T18:58:50.415239721Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T18:58:50.525274162Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T19:00:34.04504618Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T19:00:39.292046175Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=825 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 825 samples and 0 metadata"
ts=2022-08-17T19:07:49.287806746Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T19:08:56.058844372Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T19:10:56.6413866Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T19:14:57.673343377Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T19:17:38.279623157Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-17T19:17:49.898944681Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T19:17:50.109116499Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T19:17:58.322622499Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T19:22:59.476977002Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T19:23:59.685964349Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-17T19:25:40.115679014Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-17T19:31:56.714984099Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T19:39:38.528588065Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-17T19:42:39.199902035Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-17T19:45:49.287571052Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T19:46:55.166321459Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-17T19:47:40.369069722Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-17T19:49:49.289223563Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T19:58:38.121166938Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-17T19:59:58.380604265Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T20:00:58.616927417Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T20:02:39.062325544Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-17T20:02:49.288131677Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T20:02:59.106202721Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T20:13:56.816068951Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T20:14:49.287186835Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T20:19:38.354977694Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-17T20:19:58.398022541Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T20:21:49.288173473Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T20:23:49.28750486Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T20:26:49.288032631Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T20:26:54.904132312Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-17T20:27:55.153481881Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-17T20:29:49.329599883Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T20:30:40.874297592Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-17T20:31:56.209505203Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T20:33:36.669596608Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=644 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 644 samples and 0 metadata"
ts=2022-08-17T20:35:37.216558209Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=642 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 642 samples and 0 metadata"
ts=2022-08-17T20:35:57.300522533Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T20:40:49.28742863Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T20:41:58.637368395Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T20:42:58.861263969Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T20:43:39.024691822Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-17T20:44:39.243826136Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-17T20:45:59.503979514Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T20:47:54.925303265Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-17T20:48:55.178372299Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-17T20:52:36.193427715Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=640 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 640 samples and 0 metadata"
ts=2022-08-17T20:52:56.275595511Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T20:55:57.035203545Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T21:06:59.72520341Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-17T21:08:55.185887216Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-17T21:09:55.504136431Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T21:10:49.304971845Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T21:16:57.450976614Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T21:17:49.618440518Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T21:17:49.657574762Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T21:17:57.667950837Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T21:18:49.723644314Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T21:19:49.456977455Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T21:19:50.625893783Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T21:20:57.29433595Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T21:21:49.287933293Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T21:22:49.462750928Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T21:22:50.118996198Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T21:24:49.458915648Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T21:24:50.647013846Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T21:25:50.417175801Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T21:25:52.589076862Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T21:27:49.363047436Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T21:27:49.471821215Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T21:28:49.616834648Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T21:28:49.725644714Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T21:30:49.358030964Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T21:30:49.466979756Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T21:31:49.457182233Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T21:31:49.496193939Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T21:33:49.28786059Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T21:33:51.453345526Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T21:34:49.288010621Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T21:36:22.45504594Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T21:36:23.622951373Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T21:37:31.570921812Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-17T21:37:45.970533649Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 999 samples and 0 metadata"
ts=2022-08-17T21:37:51.862337614Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T21:38:02.046391889Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=805 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 805 samples and 0 metadata"
ts=2022-08-17T21:38:02.399679247Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=836 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 836 samples and 0 metadata"
ts=2022-08-17T21:46:57.262698035Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T21:47:37.503320126Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=643 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 643 samples and 0 metadata"
ts=2022-08-17T21:47:49.303646051Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T21:54:39.661538754Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-17T22:01:37.952106423Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=641 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 641 samples and 0 metadata"
ts=2022-08-17T22:01:58.087403682Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T22:02:38.31695635Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-17T22:04:58.995192785Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T22:11:56.307916262Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T22:13:49.361003515Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T22:13:57.012482729Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T22:19:38.787982422Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=644 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 644 samples and 0 metadata"
ts=2022-08-17T22:21:59.431947238Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T22:25:40.655298233Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-17T22:26:56.06939435Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T22:27:36.355847297Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=640 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 640 samples and 0 metadata"
ts=2022-08-17T22:28:56.823246789Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T22:31:57.864112796Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T22:33:58.450326982Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T22:35:49.365023832Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T22:36:59.324183796Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T22:37:49.303317516Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T22:40:49.303901592Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T22:43:56.726268874Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T22:44:36.963911021Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=639 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 639 samples and 0 metadata"
ts=2022-08-17T22:48:38.269526172Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-17T22:51:49.367939309Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T22:53:39.734983806Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-17T22:53:59.794566833Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-17T22:54:55.082809254Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-17T22:55:49.360380312Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T22:56:49.304226927Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T22:57:41.098865995Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-17T23:00:57.241105274Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T23:04:58.586767275Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T23:05:38.821214531Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=659 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 659 samples and 0 metadata"
ts=2022-08-17T23:07:59.467282375Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T23:08:49.303293454Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T23:08:59.755601068Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-17T23:09:55.042942042Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-17T23:10:49.304439494Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T23:12:56.086150408Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T23:15:49.359512631Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T23:18:49.303497636Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T23:21:39.00287458Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-17T23:21:59.062810197Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T23:23:49.303596488Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-17T23:23:59.654568313Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T23:26:55.655884972Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T23:28:56.45278321Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T23:34:58.468862842Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T23:35:49.382843645Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-17T23:36:38.996893101Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-17T23:48:58.100646149Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T23:51:58.974684821Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-17T23:54:39.786649966Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-17T23:55:40.075505191Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-17T23:55:55.13818393Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-17T23:58:56.235005741Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T00:02:37.539475548Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=641 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 641 samples and 0 metadata"
ts=2022-08-18T00:06:38.977540847Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T00:06:49.39387712Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:06:50.659996018Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T00:06:59.128044922Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T00:07:53.554370573Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T00:09:02.451641449Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:09:02.707102267Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T00:10:49.303021755Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:10:50.562532162Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T00:11:49.973626077Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:12:53.518365968Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:12:53.659351472Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T00:14:50.510107038Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:14:50.765273998Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T00:16:49.974553513Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:16:50.229757393Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T00:18:50.524597998Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:18:50.579631529Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T00:20:49.677138868Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:20:49.817010499Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T00:22:49.303928226Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:22:49.444005988Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T00:24:37.837058275Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:24:38.092115309Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T00:25:49.678594753Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:25:49.818211619Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T00:26:49.680084605Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:26:50.406123909Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T00:28:49.678073773Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:28:49.732837792Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T00:29:49.678949851Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:29:49.818906027Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T00:30:50.509746972Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:30:50.650401227Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T00:32:49.678489612Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:32:50.108882287Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T00:33:44.955200383Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-18T00:33:50.127321794Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:33:52.411366031Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 999 samples and 0 metadata"
ts=2022-08-18T00:34:59.301009501Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T00:35:39.507284941Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T00:37:55.067125473Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T00:41:56.311057391Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T00:43:56.909105398Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T00:45:57.527932309Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T00:47:37.974461271Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T00:47:58.026670791Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T00:50:38.726555798Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T00:50:58.776862346Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T00:53:49.295554306Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T00:56:55.340461281Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T00:58:55.933818951Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T01:00:56.58748126Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T01:02:57.251403533Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T01:06:58.356919671Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T01:08:38.807860684Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-18T01:08:49.295929621Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T01:14:55.475812718Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T01:15:55.769925144Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T01:16:56.066212706Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T01:17:49.301287881Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T01:18:56.722910615Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T01:19:57.023686115Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T01:20:57.328678963Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T01:22:49.344411582Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T01:23:58.151603309Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T01:24:58.401360189Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T01:27:49.343337327Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T01:31:49.344191515Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T01:36:49.295906204Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T01:36:56.82387899Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T01:37:57.122645842Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T01:38:49.29563789Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T01:41:38.135371569Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T01:43:58.6990975Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T01:47:39.651360469Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T01:49:40.214466354Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T01:50:55.562829437Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T01:52:49.295748557Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T02:02:39.541010089Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T02:03:59.875552822Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-18T02:04:40.070793515Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T02:05:49.343749921Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T02:08:56.368684809Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T02:11:57.309748884Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T02:12:37.549185493Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=641 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 641 samples and 0 metadata"
ts=2022-08-18T02:15:38.368141345Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T02:18:59.175704676Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T02:19:39.370749415Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T02:19:59.422614294Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T02:22:49.344648907Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T02:26:56.464978632Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T02:31:37.86208115Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T02:31:57.915179946Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T02:33:49.343383826Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T02:34:58.670014407Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T02:35:38.864785179Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T02:37:39.37162497Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T02:37:59.424386847Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T02:38:39.618738614Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T02:38:59.671553036Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-18T02:41:55.518441914Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T02:42:40.761001224Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-18T02:43:56.178888644Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T02:45:56.77591147Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T02:46:36.973463712Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T02:48:49.295693251Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T02:49:49.296043794Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T02:51:38.350822471Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T02:53:38.851248548Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T02:54:59.158883189Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T02:56:39.605908777Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T02:57:49.342779334Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T02:58:49.344856314Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T02:58:55.204297775Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T03:06:49.295857986Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T03:07:37.855616072Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T03:09:38.35644053Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T03:10:49.295176369Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T03:12:59.178125021Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T03:13:49.343958685Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T03:14:59.677194762Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-18T03:15:49.294771036Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T03:15:54.92547145Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T03:20:49.343675498Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T03:21:56.799912226Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T03:24:37.597215722Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T03:24:57.647986477Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T03:27:49.295616411Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T03:28:38.609201871Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=652 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 652 samples and 0 metadata"
ts=2022-08-18T03:34:55.224120686Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T03:35:55.518185996Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T03:39:36.67070221Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=640 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 640 samples and 0 metadata"
ts=2022-08-18T03:41:49.29622878Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T03:44:49.295190206Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T03:45:58.454475195Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T03:46:58.705992103Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T03:49:59.458528805Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T03:57:56.79978631Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T03:58:57.103793621Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T04:01:49.295301523Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T04:08:49.295680342Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T04:12:56.784165196Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T04:15:57.651542506Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T04:16:57.901066671Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T04:17:58.149826332Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T04:19:38.600388457Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=653 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 653 samples and 0 metadata"
ts=2022-08-18T04:19:58.652762954Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T04:20:38.847860551Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-18T04:20:58.903846986Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T04:30:56.755928356Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T04:35:38.114341892Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T04:41:39.636992821Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T04:47:56.480268344Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T04:51:49.295516312Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T04:52:37.895906336Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T04:53:38.158585125Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T04:57:39.191417499Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T04:58:49.343781215Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T04:58:59.495363275Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T05:00:59.998647014Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-18T05:06:56.87047916Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T05:07:57.174634805Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T05:11:58.264035074Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T05:12:49.297324827Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T05:14:59.083774124Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T05:17:39.787452636Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-18T05:25:49.296247168Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T05:25:57.268342953Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T05:26:57.568477445Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T05:31:49.343509843Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T05:33:59.414423945Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T05:41:56.757250962Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T05:47:38.367242071Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-18T05:49:38.878796966Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=652 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 652 samples and 0 metadata"
ts=2022-08-18T05:52:59.693961365Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-18T05:58:49.295225159Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T05:59:49.296169848Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T06:09:39.347958541Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T06:10:59.653804277Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-18T06:12:55.204556057Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T06:15:56.151552947Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T06:16:36.347134083Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=643 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 643 samples and 0 metadata"
ts=2022-08-18T06:16:49.296199633Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T06:19:57.351292687Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T06:21:37.845259549Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-18T06:21:49.295267953Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T06:23:58.410576269Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T06:31:55.562594939Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T06:33:49.294776045Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T06:36:57.112001755Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T06:37:57.405900061Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T06:40:58.175405194Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T06:41:58.423459696Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T06:42:38.622575428Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T06:46:49.343911528Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T06:48:55.228590762Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T06:52:36.371156522Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T06:55:57.401536788Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T06:59:38.381254297Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=653 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 653 samples and 0 metadata"
ts=2022-08-18T07:01:38.897172411Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=652 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 652 samples and 0 metadata"
ts=2022-08-18T07:04:39.658912467Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T07:07:00.217787875Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-18T07:11:56.713555297Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T07:12:57.016807977Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T07:16:38.06113694Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=653 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 653 samples and 0 metadata"
ts=2022-08-18T07:20:49.344145884Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T07:22:39.572230169Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-18T07:22:49.296519813Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T07:22:59.625661759Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T07:24:55.175424535Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T07:25:49.304829105Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T07:28:56.42298727Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T07:29:56.726803932Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T07:32:37.535835227Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T07:32:57.651904081Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T07:33:37.848577065Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=652 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 652 samples and 0 metadata"
ts=2022-08-18T07:33:49.295035385Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T07:36:38.602078579Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=652 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 652 samples and 0 metadata"
ts=2022-08-18T07:37:58.901873656Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T07:40:39.559260861Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-18T07:40:59.607645253Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T07:44:55.657919996Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T07:47:56.52853019Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T07:48:56.867361923Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T07:49:57.158402877Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T07:57:39.026586588Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T07:57:59.075582898Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T08:00:39.729463821Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T08:01:39.987240127Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T08:05:56.184030693Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T08:08:49.336039177Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T08:09:49.292948Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T08:11:37.771389319Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T08:12:38.000220186Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T08:16:38.935371621Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T08:16:49.337826217Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T08:16:58.982692154Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T08:21:55.184212069Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T08:27:36.789832279Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=641 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 641 samples and 0 metadata"
ts=2022-08-18T08:29:49.291277071Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T08:31:37.867640452Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T08:32:38.098472199Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-18T08:34:58.617026135Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T08:38:39.496667324Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-18T08:38:59.543856253Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T08:42:40.5111156Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-18T08:45:36.297672452Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=652 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 652 samples and 0 metadata"
ts=2022-08-18T08:47:56.9567409Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T08:52:38.165179164Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T08:53:38.395207183Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T08:55:49.291352679Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T08:58:49.293508035Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T08:59:54.853345726Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T09:09:49.295303292Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T09:13:58.904863255Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T09:16:59.659021017Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-18T09:20:55.805089707Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T09:21:36.028033412Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T09:23:56.67019385Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T09:24:49.336864462Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T09:25:57.268842165Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T09:28:37.97113926Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-18T09:29:38.223874274Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-18T09:29:58.27117585Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T09:30:38.450957633Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T09:31:38.679801284Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T09:37:55.161750718Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T09:46:49.291810944Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T09:47:37.878458139Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T09:47:49.291863324Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T09:49:49.291411897Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T09:50:38.576234797Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T09:52:39.037809731Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T09:54:59.548749748Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T09:56:55.007806641Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T09:57:55.28704551Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T10:00:41.072007385Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T10:00:56.118200365Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T10:01:56.420117206Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T10:05:57.527426383Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T10:08:58.226796555Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T10:14:59.623227834Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T10:21:49.291564086Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T10:24:57.426517703Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T10:26:37.849471454Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T10:29:58.601524889Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T10:30:38.780754551Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T10:30:58.827992846Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T10:35:54.989102998Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T10:36:55.259834591Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T10:37:55.532001725Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T10:41:49.292068137Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T10:43:49.336516863Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T10:48:58.540769397Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T10:51:59.236958668Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T10:55:55.215931528Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T11:00:56.701559916Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T11:01:57.004332903Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T11:02:57.279912156Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T11:04:57.792094782Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T11:09:58.954963412Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T11:10:59.185425779Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T11:12:39.593549345Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T11:14:49.335953494Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T11:15:49.336330561Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T11:15:55.432242331Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T11:16:55.705495702Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T11:17:40.927124161Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T11:18:49.33578572Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T11:19:41.560875988Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T11:19:49.296528449Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T11:28:59.103593915Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T11:29:39.301561699Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T11:30:39.55492566Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T11:41:37.855923738Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T11:41:57.906960913Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T11:42:58.159568996Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T11:44:49.295930485Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T11:48:49.295450983Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T11:50:55.236671683Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T11:58:49.2949949Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T11:58:57.647932913Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T12:02:39.502431752Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-18T12:08:36.197280281Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=640 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 640 samples and 0 metadata"
ts=2022-08-18T12:08:56.29470498Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T12:10:49.294566772Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T12:15:49.295811482Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T12:19:39.22581509Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-18T12:20:39.483909513Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-18T12:20:59.53532152Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T12:24:55.652962242Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T12:41:55.45106476Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T12:43:56.049074316Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T12:47:57.309624639Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T12:51:38.310915977Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T12:53:38.817745214Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T12:54:39.071863838Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T12:55:49.29640838Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T12:59:55.485621185Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:00:55.785077266Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:02:49.343883453Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T13:05:57.348055615Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:06:57.648938914Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:08:58.157625611Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:09:58.410871073Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:10:58.665585907Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:13:59.43319317Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:18:55.831080072Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:19:36.072632093Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=644 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 644 samples and 0 metadata"
ts=2022-08-18T13:20:36.371513903Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=644 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 644 samples and 0 metadata"
ts=2022-08-18T13:21:36.690915572Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=640 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 640 samples and 0 metadata"
ts=2022-08-18T13:21:56.789557178Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:24:57.640068366Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:26:49.295651683Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T13:27:58.403069383Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:28:58.6496903Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:29:58.897000198Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:30:59.151539452Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:34:40.170259026Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T13:34:55.220145896Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T13:35:40.466536025Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T13:40:57.077882267Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:41:57.382316667Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:45:58.44724237Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:49:39.405768467Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-18T13:53:55.563543775Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T13:54:40.807422793Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-18T14:02:38.656774995Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T14:03:38.908832957Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-18T14:05:39.418150135Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T14:08:55.284470969Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T14:12:56.550034126Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T14:15:57.492053269Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T14:21:59.01842834Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T14:28:40.927088569Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T14:33:57.56066942Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T14:37:58.593182765Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T14:39:59.09603956Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T14:47:36.259769945Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=640 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 640 samples and 0 metadata"
ts=2022-08-18T14:47:49.295780532Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T14:50:57.279866851Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T14:53:58.151106311Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T14:56:38.853414359Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T14:56:58.90466953Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T15:00:54.956942462Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T15:05:56.561595155Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T15:06:56.860914954Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T15:12:58.581830874Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T15:15:39.333961264Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T15:15:59.386048414Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T15:16:39.587996555Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T15:17:54.902856782Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T15:18:55.203066551Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T15:19:49.296446105Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T15:19:55.507915735Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T15:21:56.16200016Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T15:22:56.46358163Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T15:26:37.60089756Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-18T15:28:38.10674073Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T15:32:59.212632775Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T15:34:59.716239601Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-18T15:40:56.492780106Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T15:45:37.88623915Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T15:45:57.936874266Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T15:47:38.395263579Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T15:47:58.460907312Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T15:51:39.454015072Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T15:51:59.506222635Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T15:55:40.565611157Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T15:59:36.81557806Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=644 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 644 samples and 0 metadata"
ts=2022-08-18T16:00:37.217197417Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T16:00:49.36907171Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T16:01:58.142115926Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T16:02:38.36387716Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=653 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 653 samples and 0 metadata"
ts=2022-08-18T16:06:39.413181794Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T16:06:59.47993551Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T16:10:55.619384681Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T16:12:49.297360487Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T16:13:56.557598305Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T16:16:37.444432755Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T16:18:58.061983452Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T16:23:39.347778592Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T16:24:59.662771618Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-18T16:26:55.249513958Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T16:27:55.550434971Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T16:28:55.853341703Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T16:35:37.870473272Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T16:40:59.212758189Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T16:41:39.410898599Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-18T16:41:59.462704208Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T16:44:49.295511813Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T16:44:55.263619158Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T16:48:49.29659749Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T16:48:56.535497475Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T16:53:49.29506479Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T16:58:39.189526094Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T16:58:59.242571007Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T16:59:39.44756816Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T17:00:59.759139788Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-18T17:01:55.012388598Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T17:02:49.296619551Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T17:05:36.152140156Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=644 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 644 samples and 0 metadata"
ts=2022-08-18T17:07:56.883840603Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T17:10:37.709629237Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T17:11:49.296254169Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T17:12:38.219910902Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T17:14:38.729777501Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-18T17:14:58.781271898Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T17:17:59.633202625Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T17:19:55.212903025Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T17:24:36.817376273Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=641 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 641 samples and 0 metadata"
ts=2022-08-18T17:24:56.923204632Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T17:25:57.301693514Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T17:26:57.643038536Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T17:32:39.373447464Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-18T17:34:49.303121529Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T17:36:55.561992068Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T17:40:49.375431535Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T17:40:49.500463021Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T17:42:50.463470375Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T17:42:50.587846894Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T17:43:50.467651958Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T17:43:50.592434544Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T17:44:06.926315207Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-18T17:44:09.298030902Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 999 samples and 0 metadata"
ts=2022-08-18T17:44:15.038654922Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=784 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 784 samples and 0 metadata"
ts=2022-08-18T17:44:15.086177272Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=788 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 788 samples and 0 metadata"
ts=2022-08-18T17:45:39.523908241Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T17:54:57.240262403Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T17:56:57.798196459Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T17:58:38.304058876Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T17:59:58.616384815Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T18:00:49.344562823Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T18:01:49.295060591Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T18:02:55.070783935Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T18:05:40.91422124Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=655 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 655 samples and 0 metadata"
ts=2022-08-18T18:05:55.966778144Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T18:06:56.266707212Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T18:11:57.754542507Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T18:13:58.281341459Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T18:18:39.4909174Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T18:18:49.296068213Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T18:20:55.057433883Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T18:22:49.295512486Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T18:24:49.345633011Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T18:25:56.641662197Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T18:27:57.25377583Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T18:29:37.74579182Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T18:31:38.263877666Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T18:35:59.461565707Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T18:36:59.712437172Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-18T18:38:49.344247793Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T18:38:55.26139101Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T18:39:40.504885666Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T18:40:40.813483382Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-18T18:41:49.296218187Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T18:43:36.715001168Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=642 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 642 samples and 0 metadata"
ts=2022-08-18T18:44:49.294676773Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T18:48:58.186375273Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T18:49:58.434851252Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T18:51:58.943242955Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T18:52:59.191266335Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T18:53:59.444773913Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T18:54:49.295783973Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T18:54:59.704519908Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-18T18:59:56.205552001Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T19:00:56.511374276Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T19:01:49.308964145Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T19:01:49.557972404Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T19:02:05.109023048Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-18T19:02:10.167262443Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=617 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 617 samples and 0 metadata"
ts=2022-08-18T19:02:55.385050103Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T19:03:55.758664874Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T19:05:39.441876348Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-18T19:11:36.1871752Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=644 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 644 samples and 0 metadata"
ts=2022-08-18T19:11:56.287447258Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T19:12:36.496820885Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=643 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 643 samples and 0 metadata"
ts=2022-08-18T19:16:57.841135449Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T19:18:38.333267241Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T19:19:58.644888959Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T19:22:59.425077101Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T19:23:49.295471647Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T19:24:39.880435588Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T19:25:40.180507364Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T19:25:49.296248873Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T19:28:36.084183975Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=643 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 643 samples and 0 metadata"
ts=2022-08-18T19:30:56.791725284Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T19:39:39.173321002Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T19:40:39.421558232Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T19:43:55.331158583Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T19:53:49.294653824Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T19:58:59.629133997Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T19:59:54.919393762Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T20:01:55.874019407Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T20:08:37.917336703Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T20:14:39.445962368Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T20:20:56.381859951Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T20:23:57.298747899Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T20:31:49.295521881Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T20:32:59.766589642Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-18T20:34:55.316454028Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T20:35:55.611132148Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T20:37:49.295092455Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T20:40:57.207592827Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T20:46:38.786607293Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T20:46:58.837635066Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T20:48:39.286488528Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T20:49:39.54633724Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-18T20:49:49.48324391Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T20:49:49.60857296Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T20:49:59.678936261Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T20:50:49.657812323Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T20:50:49.782266766Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T20:52:49.933947728Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T20:52:49.980703743Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T20:53:24.55388333Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=28 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 28 samples and 0 metadata"
ts=2022-08-18T20:53:24.605351457Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-18T20:53:29.374852451Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=909 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 909 samples and 0 metadata"
ts=2022-08-18T20:53:40.607106229Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=322 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 322 samples and 0 metadata"
ts=2022-08-18T20:53:43.629244635Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=325 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 325 samples and 0 metadata"
ts=2022-08-18T20:53:59.298861238Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T20:55:02.376463247Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T20:55:02.776008629Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T20:56:49.295912698Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T20:56:49.342777278Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T20:57:53.436694349Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T20:57:53.483604554Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T20:59:49.296289951Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T20:59:49.343324105Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T21:00:49.296559885Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T21:00:50.51120071Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T21:01:49.481631559Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T21:02:51.486775057Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T21:02:51.533707045Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T21:04:49.37297658Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T21:04:49.4973732Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T21:05:49.693280376Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T21:06:49.295300827Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T21:06:49.696260487Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T21:07:49.936677177Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T21:07:52.187859522Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T21:09:51.501950772Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T21:09:51.626123302Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T21:11:49.295209686Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T21:12:37.889381982Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T21:12:49.376165222Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T21:13:50.509466515Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T21:14:49.295615154Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T21:15:49.480998982Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T21:15:49.608294726Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T21:17:49.481752943Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T21:17:49.528637026Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T21:18:49.485093851Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T21:18:49.609334299Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T21:19:12.650774143Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-18T21:19:20.949254575Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 999 samples and 0 metadata"
ts=2022-08-18T21:19:34.833373015Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=417 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 417 samples and 0 metadata"
ts=2022-08-18T21:19:39.698496181Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=322 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 322 samples and 0 metadata"
ts=2022-08-18T21:19:59.311114215Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T21:23:40.650312186Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T21:29:37.838537846Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T21:30:58.264381239Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T21:32:38.811400573Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T21:38:49.303892998Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T21:40:56.715576227Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T21:43:49.303602035Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T21:44:58.418775477Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T21:48:39.568108231Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T21:48:59.628675029Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T21:49:54.925107698Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T21:52:55.97402576Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T21:53:36.258528774Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=641 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 641 samples and 0 metadata"
ts=2022-08-18T21:58:37.959396212Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T22:02:54.851843847Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T22:06:41.197664731Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-18T22:06:56.257471016Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T22:09:57.388927516Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T22:12:58.286762509Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T22:14:58.887986282Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T22:16:59.47259697Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T22:18:55.059066691Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T22:21:56.151783181Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T22:36:36.047157397Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=640 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 640 samples and 0 metadata"
ts=2022-08-18T22:37:56.51358475Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T22:39:37.144032675Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=641 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 641 samples and 0 metadata"
ts=2022-08-18T22:41:37.857688287Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-18T22:43:49.304198068Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T22:43:58.527858754Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T22:44:58.816527019Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T22:45:39.045443099Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=644 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 644 samples and 0 metadata"
ts=2022-08-18T22:45:59.106405018Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T22:46:39.333544816Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-18T22:46:59.395272596Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T22:47:49.303187971Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T22:47:59.685986004Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-18T22:49:55.33053995Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-18T22:50:49.303636146Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T22:50:55.675012802Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T22:52:56.433556632Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T22:55:57.496953451Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T22:56:57.792615683Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T22:57:49.303251299Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T22:58:38.311855937Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=644 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 644 samples and 0 metadata"
ts=2022-08-18T22:58:58.372462765Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T23:03:39.838232665Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-18T23:11:37.67801813Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-18T23:14:58.65188017Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T23:15:38.880800319Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-18T23:15:49.370945589Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T23:16:59.235164314Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T23:17:39.466401383Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T23:18:59.840361274Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-18T23:23:49.303621515Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-18T23:24:49.360996525Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T23:24:56.962106408Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T23:35:55.441848378Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T23:37:41.073766001Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T23:37:56.133118744Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T23:40:57.206822278Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T23:43:58.147540657Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T23:44:38.380332383Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=644 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 644 samples and 0 metadata"
ts=2022-08-18T23:47:49.360312446Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-18T23:53:36.256814459Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=642 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 642 samples and 0 metadata"
ts=2022-08-18T23:54:56.731195161Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-18T23:57:37.658761198Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-18T23:59:58.3119028Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T00:00:59.218570757Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T00:01:59.725635918Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-19T00:03:40.31063714Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T00:05:49.30399226Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T00:05:56.121632111Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T00:06:56.475960192Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T00:07:56.829000623Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T00:08:49.300914308Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T00:08:57.157731716Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T00:10:37.708953038Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T00:14:58.901051225Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T00:15:39.128948769Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T00:16:59.483120261Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T00:18:55.065638647Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T00:20:55.779103982Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T00:21:56.193247977Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T00:25:57.661132138Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T00:26:37.893880965Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-19T00:27:38.217603615Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-19T00:27:58.278255207Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T00:29:38.801079537Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-19T00:31:59.457333568Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T00:36:36.087402575Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=640 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 640 samples and 0 metadata"
ts=2022-08-19T00:37:49.303982025Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T00:37:56.561114106Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T00:38:56.933977136Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T00:46:39.38128513Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-19T00:46:59.437985921Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T00:51:40.960600462Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T00:53:36.654858856Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=638 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 638 samples and 0 metadata"
ts=2022-08-19T00:57:58.037791943Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T00:58:38.252207397Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-19T01:00:38.876566603Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T01:00:58.939013442Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T01:04:55.132648114Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T01:06:49.366519253Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T01:06:55.833744773Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T01:11:57.6899042Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T01:13:58.283981552Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T01:15:38.817400096Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-19T01:18:59.761538773Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-19T01:21:40.718488583Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T01:23:36.382499204Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=641 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 641 samples and 0 metadata"
ts=2022-08-19T01:23:56.498691175Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T01:24:49.363324072Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T01:24:56.867902762Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T01:29:38.487807645Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-19T01:29:58.552334743Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T01:31:59.157549293Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T01:33:59.746580207Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-19T01:38:56.4905124Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T01:40:49.304135918Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T01:40:57.19744988Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T01:41:57.546162816Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T01:46:59.024136805Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T01:47:39.250331168Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T01:48:59.604967821Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T01:57:57.661768291Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T01:59:58.274530568Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T02:00:38.543992123Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T02:00:58.614641533Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T02:05:55.252076614Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T02:06:55.599354752Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T02:07:40.884054856Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T02:07:55.943417844Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T02:09:56.703196749Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T02:15:58.767330297Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T02:18:59.635534811Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T02:20:55.274541826Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T02:22:40.9236542Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-19T02:25:49.364403344Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T02:25:57.104623739Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T02:30:49.304287108Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T02:31:58.944088458Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T02:33:49.303798805Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T02:35:55.12308378Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T02:38:56.229070505Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T02:41:57.302725028Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T02:42:57.64775198Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T02:44:58.277361416Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T02:45:38.505745489Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T02:45:58.565791121Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T02:47:59.158897261Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T02:49:49.304661521Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T02:49:49.359896847Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T02:50:55.038262067Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T02:54:56.443307249Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T02:56:57.150538512Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T03:06:55.275368224Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T03:09:36.268585667Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=643 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 643 samples and 0 metadata"
ts=2022-08-19T03:13:57.757464317Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T03:17:49.304397449Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T03:17:58.9343315Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T03:20:59.828479826Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-19T03:22:55.467885402Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T03:29:57.912868281Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T03:33:49.303580018Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T03:33:59.0873621Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T03:35:39.588085545Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-19T03:38:49.300062164Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T03:45:37.944327871Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-19T03:46:58.299468898Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T03:48:38.822827287Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T03:48:58.881972174Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T03:53:49.361274467Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T03:55:41.062556464Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T03:56:56.470680531Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T04:03:38.711157895Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T04:08:55.257454489Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T04:12:56.724441851Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T04:15:49.303929103Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T04:15:57.715937846Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T04:19:49.360339567Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T04:19:58.903593008Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T04:23:55.07784877Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T04:25:40.715175631Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T04:25:55.773534026Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T04:28:56.858743157Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T04:29:57.207634222Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T04:34:58.745963608Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T04:39:49.303809534Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T04:39:55.213897469Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T04:43:36.530935858Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=644 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 644 samples and 0 metadata"
ts=2022-08-19T04:43:49.352212084Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T04:46:37.613149582Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T04:47:57.967576679Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T04:48:49.36005332Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T04:48:58.258225542Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T04:52:39.391767472Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T04:52:59.452033791Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T04:53:39.679417446Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T04:58:56.410433178Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T04:59:56.735303635Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T05:03:49.303505116Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T05:05:58.685038398Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T05:09:54.855655628Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T05:13:36.047996817Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=641 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 641 samples and 0 metadata"
ts=2022-08-19T05:13:56.147347187Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T05:14:56.455968378Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T05:15:56.755758306Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T05:19:37.862405429Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T05:21:38.370006871Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T05:24:39.194359222Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T05:24:59.255576964Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T05:25:49.307613769Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T05:26:39.781897672Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T05:26:59.842747511Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-19T05:30:56.241176865Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T05:32:49.360909869Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T05:33:49.30372892Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T05:34:57.670668619Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T05:35:37.897958252Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T05:36:38.212542624Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T05:46:36.25137097Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=644 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 644 samples and 0 metadata"
ts=2022-08-19T05:49:57.365918913Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T05:51:37.846853979Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T05:52:58.153434573Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T05:53:58.406863588Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T05:54:49.297602428Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T05:57:39.379452466Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-19T05:58:39.630511294Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-19T06:03:56.348806611Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T06:06:57.282927491Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T06:13:49.343917531Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T06:14:59.371374585Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T06:19:55.790332898Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T06:20:36.036459806Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=644 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 644 samples and 0 metadata"
ts=2022-08-19T06:24:57.384846503Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T06:25:57.634925873Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T06:28:38.336505528Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T06:31:59.15181286Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T06:35:55.223891174Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T06:36:49.295516319Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T06:37:49.344447226Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T06:38:36.06893025Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=644 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 644 samples and 0 metadata"
ts=2022-08-19T06:40:49.295213869Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T06:43:57.643539967Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T06:44:37.843455735Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T06:44:49.343928844Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T06:45:38.125963225Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-19T06:47:58.690099377Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T06:48:38.887654703Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-19T06:53:55.257871824Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T06:55:40.803178404Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T06:55:49.345118601Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T06:55:55.854633028Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T06:56:56.199302106Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T06:58:36.702548483Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=644 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 644 samples and 0 metadata"
ts=2022-08-19T07:01:49.297838552Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T07:01:57.722948676Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T07:02:57.975816648Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T07:07:39.189256841Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-19T07:08:39.44208368Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-19T07:10:54.996306619Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T07:11:55.291927966Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T07:13:40.837253729Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=652 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 652 samples and 0 metadata"
ts=2022-08-19T07:15:56.54302819Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T07:16:56.847203958Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T07:18:57.483957146Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T07:22:49.303683227Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T07:24:39.038296457Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-19T07:28:49.360198026Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T07:29:55.69885293Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T07:32:56.800848487Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T07:38:58.682597636Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T07:39:58.974438546Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T07:42:49.304435969Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T07:45:55.848406623Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T07:48:56.946855104Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T07:50:57.64165803Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T07:51:57.933884576Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T07:55:49.304520524Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T07:58:54.997650232Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T08:00:56.258678806Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T08:01:36.501244249Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=642 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 642 samples and 0 metadata"
ts=2022-08-19T08:03:57.339272049Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T08:04:37.566691699Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=642 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 642 samples and 0 metadata"
ts=2022-08-19T08:05:37.910754785Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-19T08:06:38.21095274Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-19T08:06:58.270962409Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T08:07:38.501030717Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-19T08:09:59.163878969Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T08:17:56.817258056Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T08:19:57.591297247Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T08:21:58.189615973Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T08:23:49.303802762Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T08:23:58.773119188Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T08:24:59.076117762Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T08:25:59.365864534Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T08:27:49.304298217Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T08:31:56.403531685Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T08:34:57.452053486Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T08:36:37.972814683Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-19T08:37:38.264252317Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-19T08:38:38.578956137Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T08:42:59.810581351Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-19T08:46:36.064255648Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=642 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 642 samples and 0 metadata"
ts=2022-08-19T08:49:57.251057997Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T08:50:57.598764697Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T08:51:49.300077303Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T08:53:38.37273052Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-19T08:53:49.360178004Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T08:56:59.315407634Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T08:57:59.605695463Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T09:02:56.320557548Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T09:04:56.92031413Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T09:08:49.346880525Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T09:09:58.345037521Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T09:12:59.22569653Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T09:18:36.089342195Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=643 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 643 samples and 0 metadata"
ts=2022-08-19T09:18:56.210606453Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T09:19:36.453003615Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=643 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 643 samples and 0 metadata"
ts=2022-08-19T09:20:49.304337787Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T09:24:38.248504772Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=653 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 653 samples and 0 metadata"
ts=2022-08-19T09:25:49.304021237Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T09:27:39.145840966Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-19T09:33:49.360359162Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T09:34:56.542844758Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T09:35:36.775583318Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=644 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 644 samples and 0 metadata"
ts=2022-08-19T09:35:49.360262711Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T09:35:56.891143558Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T09:36:57.236628503Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T09:38:57.832329404Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T09:42:38.953286499Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=655 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 655 samples and 0 metadata"
ts=2022-08-19T09:45:54.877380739Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T09:50:56.676618252Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T09:53:57.670161942Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T09:54:37.899844759Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-19T09:58:39.079683374Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T09:58:59.13996805Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T10:04:36.469452619Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-19T10:04:49.304183535Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T10:06:49.303930979Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T10:09:38.168939896Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-19T10:11:38.757446555Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-19T10:12:59.116503525Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T10:15:39.93478306Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T10:16:49.304398908Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T10:16:55.34099131Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T10:17:55.686322322Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T10:19:56.437734782Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T10:20:49.361737074Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T10:21:57.148272437Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T10:24:49.30759134Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T10:27:38.918944992Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T10:27:58.978312803Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T10:29:49.303789853Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T10:29:59.56190274Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T10:31:40.141700028Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T10:32:40.503520378Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T10:38:57.693127939Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T10:40:38.212612125Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-19T10:40:58.272571915Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T10:44:39.380259698Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-19T10:44:59.439938859Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T10:47:55.372021929Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T10:51:36.714615658Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=643 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 643 samples and 0 metadata"
ts=2022-08-19T10:53:49.303999395Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T10:58:38.924231862Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T11:01:39.820081669Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=653 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 653 samples and 0 metadata"
ts=2022-08-19T11:02:49.304368874Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T11:03:55.59024466Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T11:06:56.693347364Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T11:09:57.676099761Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T11:10:49.305392434Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T11:12:58.567958169Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T11:15:59.488688003Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T11:18:40.357807085Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T11:20:56.167938319Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T11:21:36.398210052Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=643 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 643 samples and 0 metadata"
ts=2022-08-19T11:21:56.512994443Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T11:29:59.0554608Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T11:33:55.299760166Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T11:36:56.402314382Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T11:37:56.749874281Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T11:41:37.992840323Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T11:44:38.881754803Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T11:44:49.305493179Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T11:46:59.538016206Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T11:49:40.417882239Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T11:53:56.968772528Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T11:56:37.932606475Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-19T11:59:49.304150575Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T12:09:57.975352201Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T12:10:49.303956261Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T12:13:59.153272029Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T12:18:40.676612732Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T12:20:36.376780757Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=641 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 641 samples and 0 metadata"
ts=2022-08-19T12:22:57.202106502Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T12:23:37.434029335Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=642 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 642 samples and 0 metadata"
ts=2022-08-19T12:28:38.981100834Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-19T12:29:59.336857935Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T12:32:49.304995071Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T12:33:40.564112258Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T12:35:36.268969011Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=641 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 641 samples and 0 metadata"
ts=2022-08-19T12:37:36.971413255Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=642 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 642 samples and 0 metadata"
ts=2022-08-19T12:38:57.435936964Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T12:39:57.726112032Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T12:41:38.250018309Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=652 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 652 samples and 0 metadata"
ts=2022-08-19T12:46:59.804486198Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-19T12:50:36.09249686Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=640 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 640 samples and 0 metadata"
ts=2022-08-19T12:50:56.208603406Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T12:51:49.30404043Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T12:52:36.789122416Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=640 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 640 samples and 0 metadata"
ts=2022-08-19T12:54:57.62481755Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T12:55:49.361335396Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T12:57:49.360429797Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T13:02:55.028658569Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T13:03:55.373772594Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T13:09:57.592250373Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T13:13:38.747480117Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-19T13:14:39.045963744Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T13:15:39.339616819Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T13:21:49.360849627Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T13:21:56.47050355Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T13:23:57.17787097Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T13:27:58.433815735Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T13:28:49.304225798Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T13:29:59.025325962Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T13:31:59.61145473Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T13:32:54.903026102Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T13:34:55.623968851Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T13:43:49.317886947Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T13:44:38.941363318Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-19T13:45:49.307649689Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T13:46:39.535059076Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-19T13:48:55.247484059Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T13:49:49.304093466Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T13:49:49.359538473Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T13:51:56.365585723Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T13:53:49.36187302Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T13:55:57.743328077Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T13:56:58.034391466Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T13:57:38.263298919Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T13:58:58.616794848Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T13:59:38.848526101Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T14:00:59.769204593Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-19T14:06:56.868747587Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T14:10:38.111173898Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T14:10:49.303903151Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T14:18:55.666408326Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T14:19:40.952830053Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-19T14:22:49.304107856Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T14:27:58.753512561Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T14:36:56.761884823Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T14:37:57.108794505Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T14:38:57.456858777Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T14:41:58.372173913Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T14:42:58.664041767Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T14:43:38.896158519Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T14:45:39.482418195Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T14:46:39.779984391Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T14:48:49.304436664Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T14:49:55.853684529Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T14:50:49.3042421Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T14:51:56.62869199Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T14:52:56.983471393Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T14:55:37.937805966Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=651 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 651 samples and 0 metadata"
ts=2022-08-19T14:56:58.293945673Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T14:57:58.586057209Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T14:58:58.877954504Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T14:59:59.173859937Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T15:05:56.191012765Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T15:09:57.632959744Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T15:12:58.545838548Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T15:15:49.361264084Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T15:15:59.440128709Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T15:16:59.73318691Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-19T15:18:49.372522811Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T15:20:41.025946308Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-19T15:22:56.80356475Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T15:26:58.128282535Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T15:29:59.030774745Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T15:30:59.320440749Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T15:33:55.288134405Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T15:36:36.279228172Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=639 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 639 samples and 0 metadata"
ts=2022-08-19T15:44:38.931093538Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T15:47:54.879880348Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T15:49:55.589654773Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T15:50:49.304784767Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T15:52:36.525538704Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=640 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 640 samples and 0 metadata"
ts=2022-08-19T15:54:57.388364016Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T15:56:37.950844456Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-19T15:56:49.305667225Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T15:57:58.3106183Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T16:06:57.628067315Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T16:12:39.389981615Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T16:13:39.702187717Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T16:14:55.108597561Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T16:15:40.395362589Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-19T16:16:55.805140168Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T16:17:36.097504857Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=650 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 650 samples and 0 metadata"
ts=2022-08-19T16:17:56.238986574Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T16:20:57.435638654Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T16:21:57.736414303Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T16:25:59.013999956Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T16:26:39.258638291Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T16:27:49.305829575Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T16:28:54.942064369Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T16:29:49.304132353Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T16:29:55.306893038Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T16:30:40.604157068Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-19T16:30:55.665528074Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T16:32:56.449769952Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T16:36:37.784301721Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T16:39:58.748645022Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T16:40:59.043921169Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T16:43:39.878004865Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=649 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 649 samples and 0 metadata"
ts=2022-08-19T16:43:49.360344162Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T16:43:54.939941786Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T16:45:55.659162034Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T16:46:40.946333467Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T16:46:56.004960396Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T16:48:56.708924091Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T16:49:49.361133398Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T16:51:57.763040398Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T16:55:58.978087751Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T16:56:39.209499964Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T16:56:49.303835945Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T16:57:39.510068814Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-19T16:58:59.865943428Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-19T17:03:36.518653889Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=642 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 642 samples and 0 metadata"
ts=2022-08-19T17:05:57.371138268Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T17:06:57.664790674Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T17:07:37.897084589Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T17:08:38.192292763Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T17:10:58.847200764Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T17:12:39.38205211Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T17:13:59.800876354Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-19T17:14:40.057562026Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T17:22:58.14117416Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T17:23:38.369704776Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=652 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 652 samples and 0 metadata"
ts=2022-08-19T17:28:55.122976751Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T17:30:49.362755398Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T17:32:36.438023964Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=641 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 641 samples and 0 metadata"
ts=2022-08-19T17:32:49.360685547Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T17:34:57.330676003Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T17:43:55.222788385Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T17:45:40.870133819Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-19T17:45:55.930000087Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T17:46:36.214805348Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=638 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 638 samples and 0 metadata"
ts=2022-08-19T17:47:36.562769158Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=639 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 639 samples and 0 metadata"
ts=2022-08-19T17:48:49.304551439Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T17:49:57.403908074Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T17:56:49.303772236Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T17:57:39.715565636Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=644 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 644 samples and 0 metadata"
ts=2022-08-19T17:57:59.785385371Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-19T18:01:41.159874437Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T18:02:56.586328768Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T18:04:49.360887801Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T18:04:57.325217978Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T18:05:37.560639296Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=640 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 640 samples and 0 metadata"
ts=2022-08-19T18:05:57.677123952Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T18:07:58.273595368Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T18:09:58.872674959Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T18:11:39.405187824Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T18:13:55.079556058Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T18:16:56.188153812Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T18:24:38.779085527Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T18:28:39.989418695Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-19T18:28:55.049152962Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T18:29:49.39166336Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T18:29:49.650808489Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T18:30:50.537030112Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T18:30:50.592442575Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T18:31:57.402058266Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T18:31:57.457385665Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T18:33:49.389480065Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T18:33:49.530390472Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T18:34:06.544942084Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=31 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 31 samples and 0 metadata"
ts=2022-08-19T18:34:06.602999085Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-19T18:34:09.306049999Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 999 samples and 0 metadata"
ts=2022-08-19T18:34:15.161138264Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=584 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 584 samples and 0 metadata"
ts=2022-08-19T18:34:15.251808671Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=576 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 576 samples and 0 metadata"
ts=2022-08-19T18:34:40.406481561Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=323 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 323 samples and 0 metadata"
ts=2022-08-19T18:34:41.510035859Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=324 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 324 samples and 0 metadata"
ts=2022-08-19T18:34:57.456346151Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T18:34:59.361800626Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T18:35:49.304300796Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T18:36:49.390595829Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T18:36:49.647813949Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T18:37:51.51932335Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T18:38:49.372877593Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T18:39:49.481567634Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T18:39:49.606890066Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T18:40:53.440348882Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T18:40:55.662076682Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T18:42:26.970786646Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-19T18:42:29.390482907Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 999 samples and 0 metadata"
ts=2022-08-19T18:42:37.450146681Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=546 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 546 samples and 0 metadata"
ts=2022-08-19T18:42:37.569882145Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=536 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 536 samples and 0 metadata"
ts=2022-08-19T18:42:55.024829876Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T18:43:59.309592459Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T18:46:49.303836235Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T18:47:40.531282998Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T18:48:40.88113142Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T18:49:36.22998636Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=640 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 640 samples and 0 metadata"
ts=2022-08-19T18:52:57.408949471Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T18:53:37.639253185Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T18:57:49.304194177Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T18:59:39.430773326Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=645 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 645 samples and 0 metadata"
ts=2022-08-19T19:00:39.762288087Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T19:04:56.259606904Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T19:06:36.847795288Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=642 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 642 samples and 0 metadata"
ts=2022-08-19T19:08:37.597636069Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T19:09:37.89283961Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T19:10:38.184042839Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T19:11:58.551091323Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T19:13:59.139011463Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T19:15:59.74887853Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=963 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 963 samples and 0 metadata"
ts=2022-08-19T19:17:55.386942012Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T19:20:56.445969369Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T19:26:49.361446656Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T19:27:38.797529377Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-19T19:29:39.387920457Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=647 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 647 samples and 0 metadata"
ts=2022-08-19T19:29:59.450140877Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T19:31:39.978203579Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=652 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 652 samples and 0 metadata"
ts=2022-08-19T19:31:55.039574477Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T19:33:55.746596246Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T19:34:49.303802672Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T19:34:56.094511894Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T19:42:58.781856749Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T19:43:59.073683811Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T19:44:49.360651079Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=443 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 443 metadata"
ts=2022-08-19T19:47:55.307955742Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T19:50:36.290396308Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=639 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 639 samples and 0 metadata"
ts=2022-08-19T19:50:56.407545127Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T19:51:56.776765047Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T19:53:37.459952339Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=641 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 641 samples and 0 metadata"
ts=2022-08-19T19:53:57.57460193Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T19:54:57.867043207Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T19:57:38.722624906Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-19T19:57:58.782235773Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T20:03:55.684849588Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T20:05:49.304198217Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T20:09:49.308318212Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T20:16:54.934634955Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T20:21:56.76620201Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T20:22:57.115614594Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T20:28:58.972650959Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T20:29:59.26414558Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T20:32:55.161950034Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=932 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 932 samples and 0 metadata"
ts=2022-08-19T20:35:49.304337701Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T20:35:56.305676108Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T20:37:49.303875784Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T20:45:39.416768234Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=646 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 646 samples and 0 metadata"
ts=2022-08-19T20:49:40.706082285Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=648 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 648 samples and 0 metadata"
ts=2022-08-19T20:50:36.055966585Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=641 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 641 samples and 0 metadata"
ts=2022-08-19T20:51:56.54526747Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=940 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 940 samples and 0 metadata"
ts=2022-08-19T20:52:36.808119954Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=652 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 652 samples and 0 metadata"
[ OK ]
ts=2022-08-19T20:52:56Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-19T20:54:37.63865414Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-19T20:56:58.303041163Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-19T20:58:56.828143913Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T21:01:56.82096116Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T21:04:56.873238136Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-19T21:05:36.054878851Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-19T21:09:57.669859403Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-19T21:11:58.302758176Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-19T21:22:56.966673386Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
[ OK ]
ts=2022-08-19T21:23:22Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-19T21:29:22.736016279Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T21:31:40.367175889Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-19T21:32:55.77745096Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-19T21:35:56.832066672Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-19T21:50:22.740684532Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T21:52:22.739726807Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T22:01:40.170607753Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-19T22:11:58.674450547Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-19T22:21:36.822950213Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=663 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 663 samples and 0 metadata"
ts=2022-08-19T22:33:40.712870638Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=672 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 672 samples and 0 metadata"
ts=2022-08-19T22:35:22.740007701Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T22:40:58.124544719Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-19T22:51:36.543049492Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-19T23:00:59.537655096Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-19T23:04:55.839123163Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-19T23:10:37.890022272Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=667 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 667 samples and 0 metadata"
ts=2022-08-19T23:12:38.507401646Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=674 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 674 samples and 0 metadata"
ts=2022-08-19T23:14:39.105162909Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-19T23:26:38.063403642Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=672 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 672 samples and 0 metadata"
ts=2022-08-19T23:26:58.124492625Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-19T23:30:39.235909822Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-19T23:31:39.526423986Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-19T23:37:36.58678277Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=663 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 663 samples and 0 metadata"
ts=2022-08-19T23:38:22.740289315Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-19T23:40:57.711186448Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-19T23:41:58.002031635Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-19T23:58:22.740520167Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T00:00:59.442515561Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-20T00:16:59.636039175Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-20T00:33:40.049404718Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-20T00:42:58.237899033Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-20T00:43:58.537182988Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-20T00:44:38.764572809Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-20T00:52:56.593233021Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-20T01:13:22.797621451Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-20T01:14:38.823974015Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-20T01:32:59.643941401Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=743 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 743 samples and 0 metadata"
ts=2022-08-20T01:33:54.933816583Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=712 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 712 samples and 0 metadata"
ts=2022-08-20T01:35:55.645337383Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=720 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 720 samples and 0 metadata"
ts=2022-08-20T01:39:22.740296768Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T01:43:38.285115929Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-20T01:45:38.870948422Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=672 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 672 samples and 0 metadata"
ts=2022-08-20T01:49:40.049782559Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-20T01:54:22.79624482Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-20T01:55:37.11155074Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=666 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 666 samples and 0 metadata"
ts=2022-08-20T02:07:36.671071887Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=666 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 666 samples and 0 metadata"
[ OK ]
ts=2022-08-20T03:08:30Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-20T03:18:39.490104928Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-20T04:49:40.065207548Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=675 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 675 samples and 0 metadata"
ts=2022-08-20T05:12:37.906458152Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=672 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 672 samples and 0 metadata"
ts=2022-08-20T05:35:31.098401971Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T05:50:55.288516902Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=470 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 470 samples and 0 metadata"
ts=2022-08-20T06:25:37.392203993Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=665 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 665 samples and 0 metadata"
ts=2022-08-20T06:42:57.763367146Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-20T06:44:38.259232229Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-20T06:48:31.092305182Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T08:29:36.191572325Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=665 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 665 samples and 0 metadata"
ts=2022-08-20T09:14:31.114285097Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-20T09:55:40.689829033Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=672 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 672 samples and 0 metadata"
ts=2022-08-20T09:56:40.931829235Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-20T10:02:58.027032865Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-20T10:10:39.640731696Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-20T10:41:56.878790815Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-20T10:54:39.616498315Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-20T11:14:39.209023967Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=672 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 672 samples and 0 metadata"
ts=2022-08-20T11:18:40.079815467Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-20T11:26:31.07409336Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T11:45:36.276932045Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=664 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 664 samples and 0 metadata"
ts=2022-08-20T11:46:36.524692457Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=665 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 665 samples and 0 metadata"
ts=2022-08-20T11:49:37.255175979Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=665 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 665 samples and 0 metadata"
ts=2022-08-20T11:58:31.074057685Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T12:23:31.074650342Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T12:31:31.115384677Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-20T13:03:31.07449723Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T13:11:55.971106394Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-20T13:14:36.686876514Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=664 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 664 samples and 0 metadata"
ts=2022-08-20T14:14:31.075842843Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T14:28:31.075232741Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T14:35:40.23010534Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-20T15:15:31.074553876Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T15:25:31.074668136Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T15:50:31.074290541Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T16:26:36.468137011Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=663 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 663 samples and 0 metadata"
ts=2022-08-20T16:37:31.075397646Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T16:47:36.325696167Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=666 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 666 samples and 0 metadata"
ts=2022-08-20T16:51:37.357085349Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-20T17:01:39.497047301Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-20T17:23:31.074886306Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T17:25:39.961262243Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=673 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 673 samples and 0 metadata"
ts=2022-08-20T17:35:37.472387769Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-20T18:02:36.291956468Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=664 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 664 samples and 0 metadata"
ts=2022-08-20T18:49:39.313623122Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-20T18:57:36.605236872Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=661 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 661 samples and 0 metadata"
ts=2022-08-20T19:58:31.083356058Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T19:59:38.827063728Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-20T20:01:31.130883612Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-20T20:10:57.011904289Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-20T20:12:37.501432083Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=663 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 663 samples and 0 metadata"
ts=2022-08-20T20:58:40.762522972Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-20T21:12:31.083794411Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T21:17:56.206525262Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-20T21:32:31.083605168Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T22:21:55.670828152Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-20T23:09:31.093236489Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T23:28:31.092818263Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-20T23:48:38.862104347Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-21T00:01:39.703056781Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-21T00:06:31.15036624Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T00:11:58.168219785Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T00:22:56.749600678Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T00:45:39.106489736Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T00:50:55.737782008Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T01:38:36.442558211Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-21T01:52:31.159483679Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T01:54:36.677785211Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=666 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 666 samples and 0 metadata"
ts=2022-08-21T02:13:58.284234238Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T02:18:59.762912675Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=490 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 490 samples and 0 metadata"
ts=2022-08-21T02:20:55.412768797Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T02:30:38.647545463Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=675 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 675 samples and 0 metadata"
ts=2022-08-21T02:54:36.435876128Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=666 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 666 samples and 0 metadata"
ts=2022-08-21T03:08:56.039800175Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T03:10:36.674324512Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=665 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 665 samples and 0 metadata"
ts=2022-08-21T03:29:37.91616847Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-21T03:42:37.079805417Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=667 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 667 samples and 0 metadata"
ts=2022-08-21T04:03:39.28306057Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=673 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 673 samples and 0 metadata"
ts=2022-08-21T04:08:40.927173687Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=673 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 673 samples and 0 metadata"
ts=2022-08-21T04:26:36.939447999Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=665 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 665 samples and 0 metadata"
ts=2022-08-21T04:50:59.639953549Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=490 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 490 samples and 0 metadata"
ts=2022-08-21T05:21:39.748413218Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=672 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 672 samples and 0 metadata"
ts=2022-08-21T05:44:31.093549416Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T06:07:39.844798391Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-21T06:27:36.458160317Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=665 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 665 samples and 0 metadata"
ts=2022-08-21T06:40:31.092295645Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T07:13:31.149647167Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T07:59:36.485082269Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=666 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 666 samples and 0 metadata"
ts=2022-08-21T08:08:55.206773059Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=470 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 470 samples and 0 metadata"
ts=2022-08-21T08:12:36.560155516Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=663 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 663 samples and 0 metadata"
ts=2022-08-21T08:12:56.675320357Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T08:14:31.149327291Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T08:25:31.093028174Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T08:44:31.092975806Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T09:28:31.102707569Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T09:33:31.092842166Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T10:21:39.071824507Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-21T11:09:39.660605832Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=672 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 672 samples and 0 metadata"
ts=2022-08-21T11:20:58.432818395Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T11:34:31.093376335Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T11:53:31.149792358Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T11:59:40.967092482Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-21T12:44:36.386059592Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=666 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 666 samples and 0 metadata"
ts=2022-08-21T13:37:31.148622797Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T14:12:40.937136504Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T14:29:36.439111675Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=664 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 664 samples and 0 metadata"
ts=2022-08-21T14:32:37.489937873Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=666 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 666 samples and 0 metadata"
ts=2022-08-21T14:52:38.902739644Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T15:06:31.103253234Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T15:09:39.400493129Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-21T15:10:39.693082439Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-21T15:27:40.153681975Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-21T15:50:31.092928536Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T16:37:38.078838495Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-21T16:54:58.569543856Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T17:13:39.466579211Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-21T17:27:58.956361443Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T17:47:55.253933243Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=470 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 470 samples and 0 metadata"
ts=2022-08-21T17:51:36.606571189Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=663 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 663 samples and 0 metadata"
ts=2022-08-21T17:53:31.092205872Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T17:59:39.088756813Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T18:08:37.653857182Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T18:10:38.230257658Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-21T18:25:58.069648562Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T18:59:38.791791879Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T19:15:38.853641414Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=666 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 666 samples and 0 metadata"
ts=2022-08-21T19:55:36.570757847Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=665 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 665 samples and 0 metadata"
ts=2022-08-21T20:01:38.549568331Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-21T20:10:36.361606946Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=666 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 666 samples and 0 metadata"
ts=2022-08-21T20:27:36.895281142Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=660 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 660 samples and 0 metadata"
ts=2022-08-21T20:28:31.091613499Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T20:29:31.148590916Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T20:31:38.185916883Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-21T20:32:38.472469302Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T20:33:58.817084275Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T20:37:39.913263966Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=667 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 667 samples and 0 metadata"
ts=2022-08-21T20:38:31.093022037Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T20:38:40.255390466Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-21T20:42:31.095203527Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T20:43:57.106730817Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T20:44:57.449104867Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T20:48:38.548261729Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (50000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-21T20:50:31.097647136Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T20:50:31.152300907Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T20:50:39.12687752Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-21T20:50:59.184547321Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T20:51:39.409945231Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-21T20:51:59.468303002Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T20:52:31.091801121Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T20:52:39.693765196Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T20:53:31.091863375Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T20:53:31.146632849Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T20:53:39.977042726Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T20:53:55.034206265Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=470 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 470 samples and 0 metadata"
ts=2022-08-21T20:54:31.092038889Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T20:54:40.315584355Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-21T20:55:31.146175373Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T20:55:40.658491565Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-21T20:56:31.091668985Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T20:56:31.146280029Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T20:58:31.091531727Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T20:58:31.146371578Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:01:31.092051274Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:01:37.742843288Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-21T21:02:31.095564884Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:02:31.151793483Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:03:38.329744024Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-21T21:04:38.615357663Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (25000) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-21T21:05:31.090937482Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (31250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:07:39.485278756Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (31250) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-21T21:09:31.091929341Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (31250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:09:40.041857812Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (31250) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-21T21:12:36.070381831Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=664 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (31250) exceeded while adding 664 samples and 0 metadata"
ts=2022-08-21T21:13:36.412524958Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=664 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (31250) exceeded while adding 664 samples and 0 metadata"
ts=2022-08-21T21:14:31.091873923Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:14:31.146474933Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:14:36.75191179Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (35714.28571428572) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-21T21:14:56.869596319Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (31250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:15:31.090941094Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (31250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:15:37.109599085Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-21T21:15:57.225953894Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (31250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:16:31.091187524Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:16:31.14585406Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:16:37.450878729Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=664 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 664 samples and 0 metadata"
ts=2022-08-21T21:16:57.563321436Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:17:31.092734622Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:17:31.147383482Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:17:37.788295043Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T21:17:57.846150961Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:18:31.093105632Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:18:31.148082782Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:18:38.106273354Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T21:18:58.163151201Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:19:38.389848873Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-21T21:19:58.447425853Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:20:31.091347636Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:20:31.145872978Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:20:38.672742809Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-21T21:20:58.732321374Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:21:38.958076285Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T21:21:59.017651968Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:22:31.092288729Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:22:31.146809587Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:22:39.248382946Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-21T21:22:59.31011758Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:23:31.094308071Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:23:31.148856428Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:23:39.535972247Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=666 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 666 samples and 0 metadata"
ts=2022-08-21T21:23:59.593951363Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:24:39.819960244Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-21T21:24:54.877207274Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=470 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 470 samples and 0 metadata"
ts=2022-08-21T21:25:31.090890971Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:25:31.14553455Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:25:40.158452892Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-21T21:25:55.216985947Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=470 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 470 samples and 0 metadata"
ts=2022-08-21T21:26:31.091418897Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:26:31.145936695Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:26:40.498338251Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T21:26:55.556593053Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:27:40.838786042Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=667 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 667 samples and 0 metadata"
ts=2022-08-21T21:27:55.896696426Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:28:31.092159426Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:28:31.203056144Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:28:36.203592721Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=661 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 661 samples and 0 metadata"
ts=2022-08-21T21:28:56.315167261Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:29:36.541283636Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-21T21:29:56.654941515Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:30:31.092530719Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:30:31.147412021Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:30:36.8822719Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=663 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 663 samples and 0 metadata"
ts=2022-08-21T21:30:56.997000203Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:31:37.222763492Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=663 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 663 samples and 0 metadata"
ts=2022-08-21T21:31:57.335613315Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:32:31.092163448Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:32:31.146791475Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:32:37.560990038Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-21T21:32:57.673340687Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:33:37.903165448Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-21T21:33:57.961075057Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:34:31.091860422Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:34:31.146369397Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:34:38.18740662Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=667 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 667 samples and 0 metadata"
ts=2022-08-21T21:34:58.246539552Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:35:18.358944458Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=7 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 7 samples and 0 metadata"
ts=2022-08-21T21:35:31.094518899Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:35:31.157924499Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:35:38.472304356Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=667 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 667 samples and 0 metadata"
ts=2022-08-21T21:35:58.529780415Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:36:38.766482673Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=667 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 667 samples and 0 metadata"
ts=2022-08-21T21:36:58.825944102Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:37:03.881482275Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=17 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 17 samples and 0 metadata"
ts=2022-08-21T21:37:31.093618678Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:37:31.148245835Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:37:39.052524633Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=667 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 667 samples and 0 metadata"
ts=2022-08-21T21:37:59.110919522Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:38:39.340570143Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=667 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 667 samples and 0 metadata"
ts=2022-08-21T21:38:59.398450027Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:39:31.092523535Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:39:31.147027785Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:39:39.623798249Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=667 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 667 samples and 0 metadata"
ts=2022-08-21T21:39:59.682505544Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=490 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 490 samples and 0 metadata"
ts=2022-08-21T21:40:31.092774037Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:40:31.147429208Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:40:39.908618879Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T21:40:54.96626052Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=470 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 470 samples and 0 metadata"
ts=2022-08-21T21:41:40.247638107Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-21T21:41:55.304810476Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:42:31.091942936Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:42:31.146374615Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:42:40.585189923Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-21T21:42:55.643770167Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:43:31.149777304Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:43:40.924870441Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-21T21:43:55.982106327Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:44:31.09159126Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:44:36.265386338Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-21T21:44:56.377511131Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:45:31.145553444Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:45:36.625240179Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-21T21:45:56.738679619Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:46:01.793756011Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=17 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 17 samples and 0 metadata"
ts=2022-08-21T21:46:31.091442139Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:46:31.146014953Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:46:36.963615881Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-21T21:46:57.075866942Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:47:31.09235459Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:47:31.146829143Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:47:37.30166899Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=661 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 661 samples and 0 metadata"
ts=2022-08-21T21:47:57.422758197Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:48:31.092754787Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:48:31.147414611Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:48:32.589987079Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=10 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 10 samples and 0 metadata"
ts=2022-08-21T21:48:37.646971507Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=667 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 667 samples and 0 metadata"
ts=2022-08-21T21:48:57.705327809Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:49:37.929245623Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T21:49:57.987849684Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:50:18.100447525Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=7 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 7 samples and 0 metadata"
ts=2022-08-21T21:50:31.092629827Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:50:31.147098604Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:50:38.214652363Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T21:50:58.272529318Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:51:31.093138285Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:51:31.147787149Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:51:38.499289624Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=667 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 667 samples and 0 metadata"
ts=2022-08-21T21:51:58.556789344Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:52:38.782767053Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=667 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 667 samples and 0 metadata"
ts=2022-08-21T21:52:58.846167474Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:53:31.091240853Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:53:31.145903159Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:53:39.071091034Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T21:53:59.128776961Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:54:31.092239005Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:54:31.146988597Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:54:39.355669535Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T21:54:59.414192622Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:55:39.6380559Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-21T21:55:59.695451438Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=490 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 490 samples and 0 metadata"
ts=2022-08-21T21:56:31.091296957Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:56:31.145883719Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:56:39.919063235Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=673 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 673 samples and 0 metadata"
ts=2022-08-21T21:56:54.976515163Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=470 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 470 samples and 0 metadata"
ts=2022-08-21T21:57:40.256929791Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-21T21:57:55.313968937Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:58:20.479994428Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=7 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 7 samples and 0 metadata"
ts=2022-08-21T21:58:31.090845816Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:58:31.14545022Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:58:40.592237868Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T21:58:55.648900898Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T21:59:30.875137031Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=10 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 10 samples and 0 metadata"
ts=2022-08-21T21:59:31.092194308Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T21:59:31.146757127Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T21:59:40.932799507Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-21T21:59:55.990548896Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:00:31.092577767Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T22:00:31.147126817Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T22:00:36.276342205Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=663 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 663 samples and 0 metadata"
ts=2022-08-21T22:00:56.395918211Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:01:31.097661766Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T22:01:31.155004514Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T22:01:36.627047786Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=664 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 664 samples and 0 metadata"
ts=2022-08-21T22:01:56.740011769Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:02:31.110743601Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T22:02:31.165567855Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T22:02:36.986977351Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=664 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 664 samples and 0 metadata"
ts=2022-08-21T22:02:57.100406251Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:03:37.329789733Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=663 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 663 samples and 0 metadata"
ts=2022-08-21T22:03:57.443452197Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:04:31.091280485Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T22:04:31.14589484Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T22:04:37.668416383Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-21T22:04:57.725913511Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:05:31.091291648Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T22:05:31.145920067Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T22:05:37.955860596Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-21T22:05:58.013467577Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:06:38.240699947Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T22:06:58.298069145Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:07:31.091367064Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T22:07:31.146049808Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T22:07:38.522263838Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T22:07:58.581456542Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:08:31.093329863Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T22:08:31.148032406Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T22:08:38.806919085Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T22:08:58.864221348Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:09:39.089866872Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T22:09:59.147029886Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:10:31.092054912Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T22:10:31.146580434Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T22:10:39.374991716Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T22:10:59.438867943Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:11:39.666349999Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-21T22:11:59.726242308Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=490 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 490 samples and 0 metadata"
ts=2022-08-21T22:12:31.091992137Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T22:12:31.146969929Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T22:12:39.95154431Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-21T22:12:55.009200178Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=470 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 470 samples and 0 metadata"
ts=2022-08-21T22:13:40.289577212Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T22:13:55.347015057Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:14:31.092321176Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T22:14:31.14696871Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T22:14:40.634206712Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T22:14:55.691396175Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:15:31.092443767Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T22:15:31.147205903Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T22:15:40.97291255Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T22:15:56.029995277Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:16:31.093198547Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T22:16:31.147714085Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T22:16:36.309763654Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-21T22:16:56.422701973Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:17:31.097870095Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T22:17:36.64985939Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-21T22:17:56.763432782Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:18:31.148995824Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T22:18:36.997318422Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-21T22:18:57.110308641Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:19:31.095651226Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T22:19:31.15028051Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T22:19:37.336165124Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-21T22:19:57.44781467Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:20:02.504541775Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=17 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 17 samples and 0 metadata"
ts=2022-08-21T22:20:37.678432132Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T22:20:57.735280024Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T22:21:02.790693208Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=17 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 17 samples and 0 metadata"
ts=2022-08-21T22:21:31.091805843Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T22:21:31.146257595Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T22:21:37.96128116Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (1250) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T22:31:36.038488279Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=666 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 666 samples and 0 metadata"
ts=2022-08-21T23:10:31.090893921Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-21T23:10:39.711699927Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-21T23:10:59.769978475Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=490 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 490 samples and 0 metadata"
ts=2022-08-21T23:11:31.147548462Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T23:12:40.343795375Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-21T23:13:31.139229889Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T23:16:31.148080832Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-21T23:18:37.3903448Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=664 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 664 samples and 0 metadata"
ts=2022-08-21T23:54:58.8083339Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-21T23:55:39.033912616Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=667 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 667 samples and 0 metadata"
ts=2022-08-21T23:58:54.957585961Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=470 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 470 samples and 0 metadata"
ts=2022-08-22T00:56:31.090681791Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T00:56:31.149529573Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-22T00:57:31.176178594Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T00:57:31.3162136Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-22T00:59:31.091846426Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (71428.57142857143) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T00:59:31.347612123Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (71428.57142857143) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-22T01:00:31.177212311Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (71428.57142857143) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T01:01:31.233291564Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (71428.57142857143) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-22T01:02:31.091753837Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T01:02:31.146508942Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-22T01:04:31.177174333Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T01:04:31.23195839Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-22T01:05:54.407635154Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-22T01:06:31.091434858Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T01:07:31.094955032Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T01:07:31.525200595Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 407 metadata"
ts=2022-08-22T01:07:49.444225562Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=1000 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 1000 samples and 0 metadata"
ts=2022-08-22T01:07:51.093060525Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=999 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 999 samples and 0 metadata"
ts=2022-08-22T01:07:59.99299803Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=224 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 224 samples and 0 metadata"
ts=2022-08-22T01:19:59.27966628Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=478 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 478 samples and 0 metadata"
ts=2022-08-22T01:40:39.785951905Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-22T01:40:54.826941617Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=470 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 470 samples and 0 metadata"
ts=2022-08-22T01:41:55.091857973Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=470 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 470 samples and 0 metadata"
ts=2022-08-22T02:09:31.078277109Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T02:17:38.767747054Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-22T02:21:39.619512357Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-22T02:21:59.664661042Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=490 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 490 samples and 0 metadata"
ts=2022-08-22T02:48:31.07646057Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T02:55:38.006942147Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-22T03:21:39.239497661Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-22T03:23:31.076493554Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T03:23:39.662586288Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=673 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 673 samples and 0 metadata"
ts=2022-08-22T03:25:55.170627091Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=470 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 470 samples and 0 metadata"
ts=2022-08-22T03:31:36.66917404Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=664 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 664 samples and 0 metadata"
ts=2022-08-22T03:37:31.075651077Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T03:37:38.300415172Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (62500) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-22T12:04:31.076866403Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (100000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T12:04:40.918000385Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (100000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-22T12:05:36.205405221Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=664 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (100000) exceeded while adding 664 samples and 0 metadata"
ts=2022-08-22T12:27:31.076357151Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (100000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T12:27:31.11618553Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=407 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (100000) exceeded while adding 0 samples and 407 metadata"
[ OK ]
ts=2022-08-22T14:22:49Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-22T18:14:49.225965608Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T18:16:40.212353272Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=694 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 694 samples and 0 metadata"
ts=2022-08-22T18:17:40.573774722Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=695 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 695 samples and 0 metadata"
ts=2022-08-22T18:18:49.227926137Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T18:19:49.248776932Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T18:23:49.224916097Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T18:24:37.962728297Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=671 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 671 samples and 0 metadata"
ts=2022-08-22T18:26:38.533980127Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-22T19:01:29.131554329Z caller=exporter.go:155 level=error integration=mysqld_exporter msg="Error pinging mysqld" err="Error 1040: Too many connections"
ts=2022-08-22T19:13:31.604194728Z caller=exporter.go:155 level=error integration=mysqld_exporter msg="Error pinging mysqld" err="Error 1040: Too many connections"
ts=2022-08-22T20:42:49.22112323Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-22T20:44:29.130851867Z caller=exporter.go:155 level=error integration=mysqld_exporter msg="Error pinging mysqld" err="Error 1040: Too many connections"
ts=2022-08-22T20:45:29.130363416Z caller=exporter.go:155 level=error integration=mysqld_exporter msg="Error pinging mysqld" err="Error 1040: Too many connections"
ts=2022-08-22T20:46:29.130156145Z caller=exporter.go:155 level=error integration=mysqld_exporter msg="Error pinging mysqld" err="Error 1040: Too many connections"
ts=2022-08-22T20:47:29.130768824Z caller=exporter.go:155 level=error integration=mysqld_exporter msg="Error pinging mysqld" err="Error 1040: Too many connections"
ts=2022-08-22T21:03:29.14962665Z caller=exporter.go:155 level=error integration=mysqld_exporter msg="Error pinging mysqld" err="Error 1040: Too many connections"
ts=2022-08-23T15:02:49.232821993Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-23T15:03:37.098195179Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=661 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 661 samples and 0 metadata"
ts=2022-08-23T15:09:59.265625319Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=560 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 560 samples and 0 metadata"
ts=2022-08-23T15:11:49.23111746Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-23T15:13:55.620756804Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=560 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 560 samples and 0 metadata"
ts=2022-08-23T15:14:40.895940241Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-23T15:14:49.227334148Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-23T15:14:49.280620576Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=409 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 409 metadata"
ts=2022-08-23T15:14:55.954001388Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=560 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 560 samples and 0 metadata"
ts=2022-08-23T15:16:36.636570332Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-23T15:17:57.097994359Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=560 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 560 samples and 0 metadata"
ts=2022-08-23T18:35:38.106150115Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-23T18:35:49.226473291Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-23T18:35:58.167653839Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=560 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 560 samples and 0 metadata"
ts=2022-08-23T18:37:39.07891941Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=667 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 667 samples and 0 metadata"
ts=2022-08-23T18:37:59.14340915Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=560 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 560 samples and 0 metadata"
ts=2022-08-23T18:38:39.37479866Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-23T18:38:59.430971588Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=560 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 560 samples and 0 metadata"
ts=2022-08-23T18:39:59.754813216Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=572 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 572 samples and 0 metadata"
ts=2022-08-23T18:40:39.975484721Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-23T18:40:49.282103811Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=409 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 409 metadata"
ts=2022-08-23T18:42:40.684800848Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=670 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 670 samples and 0 metadata"
ts=2022-08-23T18:42:49.281883055Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=409 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 409 metadata"
[ OK ]
ts=2022-08-23T18:42:51Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-23T18:43:57.142503511Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-23T18:44:37.382946539Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=663 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 663 samples and 0 metadata"
ts=2022-08-23T18:44:51.34302196Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-23T18:47:58.407215991Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-23T18:48:38.636289625Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-23T18:49:51.345930796Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-23T18:50:59.348435042Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-23T18:51:51.346927738Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-23T18:52:39.855429728Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=669 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 669 samples and 0 metadata"
ts=2022-08-23T18:52:51.403627272Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=409 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 409 metadata"
ts=2022-08-23T18:53:40.190023469Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-23T18:53:51.345124624Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-23T18:53:55.246083152Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=550 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 550 samples and 0 metadata"
ts=2022-08-23T18:54:40.519511501Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=667 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 667 samples and 0 metadata"
ts=2022-08-23T18:54:55.574001696Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-23T18:55:51.343213706Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-23T18:55:51.395042008Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=409 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 409 metadata"
ts=2022-08-23T18:55:55.902789894Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-23T18:56:36.315991784Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=663 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 663 samples and 0 metadata"
ts=2022-08-23T18:57:36.711411311Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-23T18:57:51.349464846Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-23T18:59:51.34524976Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-23T18:59:57.48961653Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
[ OK ]
2022-08-23 16:42:52.212855 I | error loading config file /etc/grafana-agent.yaml: unable to substitute config with environment variables: unable to parse variable name
[ OK ]
2022-08-23 18:10:33.481623 I | error loading config file /etc/grafana-agent.yaml: unable to substitute config with environment variables: unable to parse variable name
[ OK ]
2022-08-23 19:42:52.246489 I | error loading config file /etc/grafana-agent.yaml: unable to substitute config with environment variables: unable to parse variable name
[ OK ]
ts=2022-08-24T02:31:24Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-24T13:10:57.873327822Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-24T14:32:25.278241781Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-24T14:33:37.713843109Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=660 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 660 samples and 0 metadata"
ts=2022-08-24T14:35:25.283344983Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-24T14:35:38.346110435Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=668 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 668 samples and 0 metadata"
ts=2022-08-24T22:34:37.776868297Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=661 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 661 samples and 0 metadata"
ts=2022-08-25T01:05:40.839470904Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=660 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 660 samples and 0 metadata"
ts=2022-08-25T02:11:58.573797263Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-25T03:23:57.983112866Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-25T03:24:25.276384617Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-25T03:25:38.566652852Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=659 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 659 samples and 0 metadata"
ts=2022-08-25T03:26:25.279664514Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-25T03:26:38.879540701Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=664 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 664 samples and 0 metadata"
ts=2022-08-25T03:29:39.835653565Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=663 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 663 samples and 0 metadata"
ts=2022-08-25T04:16:25.273515854Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-25T04:17:36.882111207Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=658 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 658 samples and 0 metadata"
ts=2022-08-25T04:17:57.012032501Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-25T04:18:37.273043969Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=659 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 659 samples and 0 metadata"
ts=2022-08-25T04:19:57.725021665Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-25T04:20:25.272993563Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=500 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 500 metadata"
ts=2022-08-25T04:20:25.33245471Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error while sending metadata" count=409 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 409 metadata"
ts=2022-08-25T04:20:37.982616282Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=666 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 666 samples and 0 metadata"
ts=2022-08-25T04:20:58.047973563Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-25T04:21:58.360529139Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-25T04:22:38.609564898Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=665 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 665 samples and 0 metadata"
ts=2022-08-25T04:23:58.987138416Z caller=dedupe.go:112 agent=prometheus instance=4348e35bd74bc15bb464b7c1cb114b95 component=remote level=error remote_name=4348e3-7b4cd8 url=https://metrics.grafanacloud.newfold.com/api/prom/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
[ OK ]
ts=2022-08-25T18:43:23Z level=info caller=traces/traces.go:143 msg="Traces Logger Initialized" component=traces
ts=2022-08-25T20:18:58.220704521Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-25T20:41:59.748749716Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=570 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 570 samples and 0 metadata"
ts=2022-08-25T20:48:56.790281557Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-25T23:17:37.289357509Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=656 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 656 samples and 0 metadata"
ts=2022-08-25T23:18:57.667466888Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-25T23:20:58.294982724Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-25T23:26:39.980936265Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-25T23:27:55.382154634Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=550 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 550 samples and 0 metadata"
ts=2022-08-25T23:28:40.791972688Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=659 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 659 samples and 0 metadata"
ts=2022-08-25T23:28:55.851830482Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-25T23:30:56.57142891Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-25T23:31:56.884072025Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-25T23:33:37.428436136Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=656 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 656 samples and 0 metadata"
ts=2022-08-25T23:35:38.000555215Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=659 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 659 samples and 0 metadata"
ts=2022-08-26T00:10:59.524646186Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-26T00:11:23.998979598Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error while sending metadata" count=409 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 0 samples and 409 metadata"
ts=2022-08-26T00:11:39.742370934Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=661 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 661 samples and 0 metadata"
ts=2022-08-26T00:11:59.795818128Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=570 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 570 samples and 0 metadata"
ts=2022-08-26T00:12:39.999855355Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-26T00:12:55.049735001Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=550 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 550 samples and 0 metadata"
ts=2022-08-26T00:13:40.303569188Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=662 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 662 samples and 0 metadata"
ts=2022-08-26T00:13:55.358799244Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-26T00:14:55.667174724Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-26T00:15:40.912960233Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=664 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 664 samples and 0 metadata"
ts=2022-08-26T00:15:55.963837642Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-26T00:16:56.327629552Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-26T00:17:56.659635309Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-26T00:18:36.872801112Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=658 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 658 samples and 0 metadata"
ts=2022-08-26T00:18:56.97341669Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-26T00:19:57.30918988Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-26T00:21:37.832843099Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=661 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 661 samples and 0 metadata"
ts=2022-08-26T00:21:57.882695256Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-26T00:22:58.140160225Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-26T00:50:56.03298283Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-26T03:16:59.287801351Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-26T03:17:59.525958859Z caller=dedupe.go:112 agent=prometheus instance=d6e73dd1521ce78019fcd4cfc72f0a5d component=remote level=error remote_name=d6e73d-2dc2bd url=https://metrics.grafanacloud.newfold.com/api/v1/push msg="non-recoverable error" count=558 exemplarCount=0 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (125000) exceeded while adding 558 samples and 0 metadata"
ts=2022-08-26T03:37:59.58460714Z caller=collector.go:568 level=error integration=apache_http msg="Error scraping apache:" err="error scraping apache: Get \"http://localhost/whm-server-status/?auto\": dial tcp 127.0.0.1:80: connect: connection refused"
ts=2022-08-26T03:38:29.130424193Z caller=exporter.go:155 level=error integration=mysqld_exporter msg="Error pinging mysqld" err="dial tcp 127.0.0.1:3306: connect: connection refused"
ts=2022-08-26T03:38:59.557301931Z caller=collector.go:568 level=error integration=apache_http msg="Error scraping apache:" err="error scraping apache: Get \"http://localhost/whm-server-status/?auto\": dial tcp 127.0.0.1:80: connect: connection refused"
ts=2022-08-26T03:39:29.129965097Z caller=exporter.go:155 level=error integration=mysqld_exporter msg="Error pinging mysqld" err="dial tcp 127.0.0.1:3306: connect: connection refused"
ts=2022-08-26T03:39:59.558010639Z caller=collector.go:568 level=error integration=apache_http msg="Error scraping apache:" err="error scraping apache: Get \"http://localhost/whm-server-status/?auto\": dial tcp 127.0.0.1:80: connect: connection refused"
ts=2022-08-26T03:40:29.13088394Z caller=exporter.go:155 level=error integration=mysqld_exporter msg="Error pinging mysqld" err="dial tcp 127.0.0.1:3306: connect: connection refused"
ts=2022-08-26T03:40:59.557326993Z caller=collector.go:568 level=error integration=apache_http msg="Error scraping apache:" err="error scraping apache: Get \"http://localhost/whm-server-status/?auto\": dial tcp 127.0.0.1:80: connect: connection refused"
ts=2022-08-26T03:41:29.130625134Z caller=exporter.go:155 level=error integration=mysqld_exporter msg="Error pinging mysqld" err="dial tcp 127.0.0.1:3306: connect: connection refused"
ts=2022-08-26T03:41:59.55782006Z caller=collector.go:568 level=error integration=apache_http msg="Error scraping apache:" err="error scraping apache: Get \"http://localhost/whm-server-status/?auto\": dial tcp 127.0.0.1:80: connect: connection refused"
ts=2022-08-26T03:42:29.130575694Z caller=exporter.go:155 level=error integration=mysqld_exporter msg="Error pinging mysqld" err="dial tcp 127.0.0.1:3306: connect: connection refused"
ts=2022-08-26T03:42:59.558980587Z caller=collector.go:568 level=error integration=apache_http msg="Error scraping apache:" err="status 404 Not Found (404): <!doctype html>\n<html lang=\"en\">\n<head>\n <meta charset=\"utf-8\">\n <meta http-equiv=\"x-ua-compatible\" content=\"ie=edge\">\n <title>404 Error</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n <meta name=\"robots\" content=\"noindex\">\n <style>\n @media screen and (max-width:500px) {\n body { font-size: .6em; } \n }\n </style>\n</head>\n\n<body style=\"text-align: center;\">\n\n <h1 style=\"font-family: Georgia, serif; color: #4a4a4a; margin-top: 4em; line-height: 1.8;\">\n We’re sorry.<br>This page is temporarily offline for server maintenance.<br>It will be back online soon.\n </h1>\n\n <h2 style=\" font-family: Verdana, sans-serif; color: #7d7d7d; font-weight: 300;\">\n 404 Error. Not Found.\n </h2>\n\n</body>\n\n</html>\n <hr>\n\n <address><a href=\"http://www.acme.com/software/thttpd/\">thttpd</a></address>\n\n </body>\n\n</html>\n"