Add query exporter
parent
5a24b6eff6
commit
c413ab41c2
|
@ -6,6 +6,10 @@ services:
|
||||||
volumes:
|
volumes:
|
||||||
- "${DATA_DIR:?specify data dir in .env file}/postgres:/var/lib/postgresql/data:rw"
|
- "${DATA_DIR:?specify data dir in .env file}/postgres:/var/lib/postgresql/data:rw"
|
||||||
restart: always
|
restart: always
|
||||||
|
extra_hosts:
|
||||||
|
- "host.docker.internal:host-gateway"
|
||||||
|
ports:
|
||||||
|
- "0.0.0.0:15432:5432"
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_DB: bluesky
|
POSTGRES_DB: bluesky
|
||||||
POSTGRES_PASSWORD: "${POSTGRES_PASSWORD:?specify password in .env file}"
|
POSTGRES_PASSWORD: "${POSTGRES_PASSWORD:?specify password in .env file}"
|
||||||
|
@ -29,7 +33,7 @@ services:
|
||||||
ATP_PLC_ADDR: "${PLC_ADDRESS:-https://plc.directory}"
|
ATP_PLC_ADDR: "${PLC_ADDRESS:-https://plc.directory}"
|
||||||
ports:
|
ports:
|
||||||
- "0.0.0.0:11001:8080"
|
- "0.0.0.0:11001:8080"
|
||||||
command: ["--log-level=0"]
|
command: [ "--log-level=0" ]
|
||||||
consumer:
|
consumer:
|
||||||
build:
|
build:
|
||||||
context: .
|
context: .
|
||||||
|
@ -50,7 +54,7 @@ services:
|
||||||
ATP_PLC_ADDR: "${PLC_ADDRESS:-https://plc.directory}"
|
ATP_PLC_ADDR: "${PLC_ADDRESS:-https://plc.directory}"
|
||||||
ports:
|
ports:
|
||||||
- "0.0.0.0:11002:8080"
|
- "0.0.0.0:11002:8080"
|
||||||
command: ["--log-level=0"]
|
command: [ "--log-level=0" ]
|
||||||
|
|
||||||
record-indexer:
|
record-indexer:
|
||||||
build:
|
build:
|
||||||
|
@ -76,4 +80,4 @@ services:
|
||||||
ATP_PLC_ADDR: "${PLC_ADDRESS:-https://plc.directory}"
|
ATP_PLC_ADDR: "${PLC_ADDRESS:-https://plc.directory}"
|
||||||
ports:
|
ports:
|
||||||
- "0.0.0.0:11003:8080"
|
- "0.0.0.0:11003:8080"
|
||||||
command: ["--log-level=0"]
|
command: [ "--log-level=0" ]
|
||||||
|
|
|
@ -1,9 +1,23 @@
|
||||||
|
# Graceful shutdown/restart
|
||||||
|
|
||||||
|
`docker compose stop lister`
|
||||||
|
`docker compose stop consumer`
|
||||||
|
`docker compose stop record-indexer`
|
||||||
|
|
||||||
|
Take a look at grafana, once all quiet
|
||||||
|
|
||||||
|
`docker compose stop postgres`
|
||||||
|
|
||||||
|
Start everything up
|
||||||
|
|
||||||
|
`docker compose up -d --build`
|
||||||
|
|
||||||
# Control number of workers
|
# Control number of workers
|
||||||
|
|
||||||
Full throttle
|
Full throttle
|
||||||
`curl 'localhost:11003/pool/resize?size=50'`
|
`curl 'localhost:11003/pool/resize?size=50'`
|
||||||
|
|
||||||
Half throttle
|
Half throttle (recommended)
|
||||||
`curl 'localhost:11003/pool/resize?size=25'`
|
`curl 'localhost:11003/pool/resize?size=25'`
|
||||||
|
|
||||||
Stop eating all of my Internet
|
Stop eating all of my Internet
|
||||||
|
|
|
@ -6,87 +6,12 @@
|
||||||
|
|
||||||
### Note: remember to allow ports for Prometheus to see host.docker.internal:xxxx from within container
|
### Note: remember to allow ports for Prometheus to see host.docker.internal:xxxx from within container
|
||||||
|
|
||||||
|
Lister, consumer, indexer
|
||||||
`sudo ufw allow 11001`
|
`sudo ufw allow 11001`
|
||||||
`sudo ufw allow 11002`
|
`sudo ufw allow 11002`
|
||||||
`sudo ufw allow 11003`
|
`sudo ufw allow 11003`
|
||||||
|
|
||||||
# Install Node-exporter
|
Postgres
|
||||||
|
`sudo ufw allow 15432`
|
||||||
|
|
||||||
You'll need to install node exporter for monitoring
|
# Go to `metrics/prometheus/exporters` and install node and query exporters
|
||||||
|
|
||||||
1. Download Node Exporter
|
|
||||||
As first step, you need to download the Node Exporter binary which is available for Linux in the official Prometheus website here. In the website, you will find a table with the list of available builds. Of our interest in this case, is the node_exporter build for Linux AMD64:
|
|
||||||
|
|
||||||
Node Exporter Ubuntu Linux
|
|
||||||
|
|
||||||
In this case the latest available version is the 1.7.0. Copy the .tar.gz URL and download it somewhere in your server using wget or cURL:
|
|
||||||
|
|
||||||
`wget https://github.com/prometheus/node_exporter/releases/download/v1.7.0/node_exporter-1.7.0.linux-amd64.tar.gz`
|
|
||||||
|
|
||||||
2. Extract Node Exporter and move binary
|
|
||||||
After downloading the latest version of Node Exporter, proceed to extract the content of the downloaded tar using the following command:
|
|
||||||
|
|
||||||
`tar xvf node_exporter-1.7.0.linux-amd64.tar.gz`
|
|
||||||
The content of the zip will be extracted in the current directory, the extracted directory will contain 3 files:
|
|
||||||
|
|
||||||
LICENSE (license text file)
|
|
||||||
node_exporter (binary)
|
|
||||||
NOTICE (license text file)
|
|
||||||
You only need to move the binary file node_exporter to the /usr/local/bin directory of your system. Switch to the node_exporter directory:
|
|
||||||
|
|
||||||
`cd node_exporter-1.7.0.linux-amd64`
|
|
||||||
And then copy the binary file with the following command:
|
|
||||||
|
|
||||||
`sudo cp node_exporter /usr/local/bin`
|
|
||||||
Then you can remove the directory that we created after extracting the zip file content:
|
|
||||||
|
|
||||||
# Exit current directory
|
|
||||||
`cd ..`
|
|
||||||
|
|
||||||
# Remove the extracted directory
|
|
||||||
`rm -rf ./node_exporter-1.7.0.linux-amd64`
|
|
||||||
3. Create Node Exporter User
|
|
||||||
As a good practice, create an user in the system for Node Exporter:
|
|
||||||
|
|
||||||
`sudo useradd --no-create-home --shell /bin/false node_exporter`
|
|
||||||
And set the owner of the binary node_exporter to the recently created user:
|
|
||||||
|
|
||||||
`sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter`
|
|
||||||
4. Create and start the Node Exporter service
|
|
||||||
The Node Exporter service should always start when the server boots so it will always be available to be scrapped for information. Create the node_exporter.service file with nano:
|
|
||||||
|
|
||||||
`sudo nano /etc/systemd/system/node_exporter.service`
|
|
||||||
And paste the following content in the file:
|
|
||||||
|
|
||||||
```
|
|
||||||
[Unit]
|
|
||||||
Description=Node Exporter
|
|
||||||
Wants=network-online.target
|
|
||||||
After=network-online.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
User=node_exporter
|
|
||||||
Group=node_exporter
|
|
||||||
Type=simple
|
|
||||||
ExecStart=/usr/local/bin/node_exporter
|
|
||||||
Restart=always
|
|
||||||
RestartSec=3
|
|
||||||
|
|
||||||
[Install]https://github.com/prometheus/node_exporter/releases/download
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
```
|
|
||||||
|
|
||||||
Close nano and save the changes to the file. Proceed to reload the daemon with:
|
|
||||||
|
|
||||||
`sudo systemctl daemon-reload`
|
|
||||||
And finally enable the node_exporter service with the following command:
|
|
||||||
|
|
||||||
`sudo systemctl enable node_exporter`
|
|
||||||
And then start the service:
|
|
||||||
|
|
||||||
`sudo systemctl start node_exporter`
|
|
||||||
|
|
||||||
`sudo ufw allow 9090`
|
|
||||||
`sudo ufw allow 9100`
|
|
||||||
|
|
||||||
now go to `http://localhost:9100/metrics`
|
|
|
@ -176,6 +176,140 @@
|
||||||
"title": "Ram Usage",
|
"title": "Ram Usage",
|
||||||
"type": "gauge"
|
"type": "gauge"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "PBFA97CFB590B2093"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"min": 0,
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "none",
|
||||||
|
"unitScale": true
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 4,
|
||||||
|
"w": 4,
|
||||||
|
"x": 6,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"id": 12,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "area",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"showPercentChange": false,
|
||||||
|
"textMode": "auto",
|
||||||
|
"wideLayout": true
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.3.3",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "PBFA97CFB590B2093"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "repos_seen{job=\"db\"}",
|
||||||
|
"instant": false,
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Repos seen",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "PBFA97CFB590B2093"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"min": 0,
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "none",
|
||||||
|
"unitScale": true
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 4,
|
||||||
|
"w": 4,
|
||||||
|
"x": 10,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"id": 11,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "area",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"showPercentChange": false,
|
||||||
|
"textMode": "auto",
|
||||||
|
"wideLayout": true
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.3.3",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "PBFA97CFB590B2093"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "repos_fully_indexed{job=\"db\"}",
|
||||||
|
"instant": false,
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Repos fully indexed",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"datasource": {
|
"datasource": {
|
||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
|
@ -201,8 +335,8 @@
|
||||||
},
|
},
|
||||||
"gridPos": {
|
"gridPos": {
|
||||||
"h": 4,
|
"h": 4,
|
||||||
"w": 7,
|
"w": 10,
|
||||||
"x": 17,
|
"x": 14,
|
||||||
"y": 0
|
"y": 0
|
||||||
},
|
},
|
||||||
"id": 7,
|
"id": 7,
|
||||||
|
@ -1211,13 +1345,13 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"time": {
|
"time": {
|
||||||
"from": "now-3h",
|
"from": "now-30m",
|
||||||
"to": "now"
|
"to": "now"
|
||||||
},
|
},
|
||||||
"timepicker": {},
|
"timepicker": {},
|
||||||
"timezone": "",
|
"timezone": "",
|
||||||
"title": "Bluesky indexer",
|
"title": "Bluesky indexer",
|
||||||
"uid": "aXw6dQhSz",
|
"uid": "aXw6dQhSz",
|
||||||
"version": 15,
|
"version": 17,
|
||||||
"weekStart": ""
|
"weekStart": ""
|
||||||
}
|
}
|
|
@ -0,0 +1,92 @@
|
||||||
|
# Install Node-exporter
|
||||||
|
|
||||||
|
You'll need to install node exporter for monitoring
|
||||||
|
|
||||||
|
1. Download Node Exporter
|
||||||
|
As first step, you need to download the Node Exporter binary which is available for Linux in the official Prometheus website here. In the website, you will find a table with the list of available builds. Of our interest in this case, is the node_exporter build for Linux AMD64:
|
||||||
|
|
||||||
|
Node Exporter Ubuntu Linux
|
||||||
|
|
||||||
|
In this case the latest available version is the 1.7.0. Copy the .tar.gz URL and download it somewhere in your server using wget or cURL:
|
||||||
|
|
||||||
|
`wget https://github.com/prometheus/node_exporter/releases/download/v1.7.0/node_exporter-1.7.0.linux-amd64.tar.gz`
|
||||||
|
|
||||||
|
2. Extract Node Exporter and move binary
|
||||||
|
After downloading the latest version of Node Exporter, proceed to extract the content of the downloaded tar using the following command:
|
||||||
|
|
||||||
|
`tar xvf node_exporter-1.7.0.linux-amd64.tar.gz`
|
||||||
|
The content of the zip will be extracted in the current directory, the extracted directory will contain 3 files:
|
||||||
|
|
||||||
|
LICENSE (license text file)
|
||||||
|
node_exporter (binary)
|
||||||
|
NOTICE (license text file)
|
||||||
|
You only need to move the binary file node_exporter to the /usr/local/bin directory of your system. Switch to the node_exporter directory:
|
||||||
|
|
||||||
|
`cd node_exporter-1.7.0.linux-amd64`
|
||||||
|
And then copy the binary file with the following command:
|
||||||
|
|
||||||
|
`sudo cp node_exporter /usr/local/bin`
|
||||||
|
Then you can remove the directory that we created after extracting the zip file content:
|
||||||
|
|
||||||
|
# Exit current directory
|
||||||
|
`cd ..`
|
||||||
|
|
||||||
|
# Remove the extracted directory
|
||||||
|
`rm -rf ./node_exporter-1.7.0.linux-amd64`
|
||||||
|
3. Create Node Exporter User
|
||||||
|
As a good practice, create an user in the system for Node Exporter:
|
||||||
|
|
||||||
|
`sudo useradd --no-create-home --shell /bin/false node_exporter`
|
||||||
|
And set the owner of the binary node_exporter to the recently created user:
|
||||||
|
|
||||||
|
`sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter`
|
||||||
|
4. Create and start the Node Exporter service
|
||||||
|
The Node Exporter service should always start when the server boots so it will always be available to be scrapped for information. Create the node_exporter.service file with nano:
|
||||||
|
|
||||||
|
`sudo nano /etc/systemd/system/node_exporter.service`
|
||||||
|
And paste the following content in the file:
|
||||||
|
|
||||||
|
```
|
||||||
|
[Unit]
|
||||||
|
Description=Node Exporter
|
||||||
|
Wants=network-online.target
|
||||||
|
After=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
User=node_exporter
|
||||||
|
Group=node_exporter
|
||||||
|
Type=simple
|
||||||
|
ExecStart=/usr/local/bin/node_exporter
|
||||||
|
Restart=always
|
||||||
|
RestartSec=3
|
||||||
|
|
||||||
|
[Install]https://github.com/prometheus/node_exporter/releases/download
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
```
|
||||||
|
|
||||||
|
Close nano and save the changes to the file. Proceed to reload the daemon with:
|
||||||
|
|
||||||
|
`sudo systemctl daemon-reload`
|
||||||
|
And finally enable the node_exporter service with the following command:
|
||||||
|
|
||||||
|
`sudo systemctl enable node_exporter`
|
||||||
|
And then start the service:
|
||||||
|
|
||||||
|
`sudo systemctl start node_exporter`
|
||||||
|
|
||||||
|
`sudo ufw allow 9090`
|
||||||
|
`sudo ufw allow 9100`
|
||||||
|
|
||||||
|
now go to `http://localhost:9100/metrics`
|
||||||
|
|
||||||
|
# Install query exporter
|
||||||
|
|
||||||
|
`sudo snap install query-exporter`
|
||||||
|
|
||||||
|
Run query exporter
|
||||||
|
|
||||||
|
`cd exporters`
|
||||||
|
|
||||||
|
`query-exporter config.yaml -H 0.0.0.0`
|
||||||
|
|
||||||
|
`sudo ufw allow 9560`
|
|
@ -0,0 +1,23 @@
|
||||||
|
databases:
|
||||||
|
db1:
|
||||||
|
dsn: postgres://postgres:postgres@host.docker.internal:15432/bluesky?sslmode=disable
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
repos_fully_indexed:
|
||||||
|
type: gauge
|
||||||
|
description: Repositories fully indexed
|
||||||
|
repos_seen:
|
||||||
|
type: gauge
|
||||||
|
description: Repositories seen
|
||||||
|
|
||||||
|
queries:
|
||||||
|
query1:
|
||||||
|
interval: 30
|
||||||
|
databases: [db1]
|
||||||
|
metrics: [repos_fully_indexed]
|
||||||
|
sql: select count(*) as repos_fully_indexed from repos where last_indexed_rev <> '' and (last_indexed_rev >= first_rev_since_reset or first_rev_since_reset is null or first_rev_since_reset = '');
|
||||||
|
query2:
|
||||||
|
interval: 30
|
||||||
|
databases: [db1]
|
||||||
|
metrics: [repos_seen]
|
||||||
|
sql: select count(*) as repos_seen from repos;
|
|
@ -10,4 +10,7 @@ scrape_configs:
|
||||||
- targets: ['host.docker.internal:9100']
|
- targets: ['host.docker.internal:9100']
|
||||||
- job_name: indexer
|
- job_name: indexer
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: [ host.docker.internal:11001, host.docker.internal:11002, host.docker.internal:11003 ]
|
- targets: [ host.docker.internal:11001, host.docker.internal:11002, host.docker.internal:11003 ]
|
||||||
|
- job_name: db
|
||||||
|
static_configs:
|
||||||
|
- targets: ['host.docker.internal:9560']
|
Loading…
Reference in New Issue