Merge pull request #37 from derpeter/main

add native support for influxDB 2.0 / Dashboard for influxDB 2.0
This commit is contained in:
sparky8512 2021-11-26 14:36:24 -08:00 committed by GitHub
commit 5b6521a51c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 1040 additions and 4 deletions

View file

@ -20,10 +20,6 @@ pip install --upgrade -r requirements.txt
If you really care about the details here or wish to minimize your package requirements, you can find more detail about which specific modules are required for what usage in [this Wiki article](https://github.com/sparky8512/starlink-grpc-tools/wiki/Python-Module-Dependencies). If you really care about the details here or wish to minimize your package requirements, you can find more detail about which specific modules are required for what usage in [this Wiki article](https://github.com/sparky8512/starlink-grpc-tools/wiki/Python-Module-Dependencies).
### InfluxDB 2.x
The script that records data to InfluxDB uses the (slightly) older version of the InfluxDB client Python module, not the InfluxDB 2.x client. It can still be made to work with an InfluxDB 2.0 server (and probably later 2.x versions), but doing so requires using `influx v1` [CLI commands](https://docs.influxdata.com/influxdb/v2.0/reference/cli/influx/v1/) on the server to map the 1.x username, password, and database names to their 2.x equivalents.
### Generating the gRPC protocol modules (for non-Docker usage) ### Generating the gRPC protocol modules (for non-Docker usage)
This step is no longer required, nor is it particularly recommended, so the details have been moved to [this Wiki article](https://github.com/sparky8512/starlink-grpc-tools/wiki/gRPC-Protocol-Modules). This step is no longer required, nor is it particularly recommended, so the details have been moved to [this Wiki article](https://github.com/sparky8512/starlink-grpc-tools/wiki/gRPC-Protocol-Modules).
@ -151,6 +147,28 @@ The `-t` option to `docker run` will prevent Python from buffering the script's
If there is some problem with accessing the image from the GitHub Packages repository, there is also an image available on Docker Hub, which can be accessed as `neurocis/starlink-grpc-tools`, but note that that image may not be as up to date with changes as the supported one. If there is some problem with accessing the image from the GitHub Packages repository, there is also an image available on Docker Hub, which can be accessed as `neurocis/starlink-grpc-tools`, but note that that image may not be as up to date with changes as the supported one.
## Running with SystemD
To run e.g. the `dish_grpc_influx2` script via SystemD the following steps are an option.
Commands here should work for debian / ubuntu based distribution
```shell
sudo apt instlall python3-venv
cd /opt/
sudo mkdir starlink-grpc-tool
sudo chown <your non-root user>
git clone <git url>
cd starlink-grpc-tool
python3 -m venv venv
source venv/bin/activate.sh
pip3 install -r requrements.txt
sudo cp systemd/starlink-influx2.service /etc/systemd/starlink-influx2.service
sudo <your favorite editor> /etc/systemd/system/starlink-influx2.service
# Set influx url, token, bucket and org
sudo systemctl enable starlink-influx2
sudo systemctl start starlink-influx2
```
## Dashboards ## Dashboards
Several users have built dashboards for displaying data collected by the scripts in this project. Information on those can be found in [this Wiki article](https://github.com/sparky8512/starlink-grpc-tools/wiki/Dashboards). If you have one you would like to add, please feel free to edit the Wiki page to do so. Several users have built dashboards for displaying data collected by the scripts in this project. Information on those can be found in [this Wiki article](https://github.com/sparky8512/starlink-grpc-tools/wiki/Dashboards). If you have one you would like to add, please feel free to edit the Wiki page to do so.

View file

@ -0,0 +1,675 @@
{
"__inputs": [
{
"name": "DS_INFLUXDB",
"label": "InfluxDB",
"description": "",
"type": "datasource",
"pluginId": "influxdb",
"pluginName": "InfluxDB"
},
{
"name": "VAR_TBL_STATS",
"label": "influx",
"description": "",
"type": "datasource",
"pluginId": "influxdb",
"pluginName": "InfluxDB"
},
{
"name": "VAR_DS_INFLUXDB",
"type": "constant",
"label": "InfluxDB DataSource",
"value": "InfluxDB-starlinkstats",
"description": ""
},
{
"name": "VAR_TBL_STATS",
"type": "constant",
"label": "Table name for Statistics",
"value": "spacex.starlink.user_terminal.status",
"description": ""
}
],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "8.2.5"
},
{
"type": "datasource",
"id": "influxdb",
"name": "InfluxDB",
"version": "1.0.0"
},
{
"type": "panel",
"id": "table",
"name": "Table",
"version": ""
},
{
"type": "panel",
"id": "timeseries",
"name": "Time series",
"version": ""
}
],
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"gnetId": null,
"graphTooltip": 0,
"id": null,
"iteration": 1637920561166,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": "${DS_INFLUXDB}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "binbps"
},
"overrides": [
{
"matcher": {
"id": "byRegexp",
"options": "/(uplink)/m"
},
"properties": [
{
"id": "displayName",
"value": "Uplink"
}
]
},
{
"matcher": {
"id": "byName",
"options": "downlink_throughput_bps"
},
"properties": [
{
"id": "displayName",
"value": "Downlink"
}
]
},
{
"matcher": {
"id": "byName",
"options": "uplink_throughput_bps"
},
"properties": [
{
"id": "displayName",
"value": "Uplink"
}
]
}
]
},
"gridPos": {
"h": 11,
"w": 12,
"x": 0,
"y": 0
},
"id": 4,
"options": {
"legend": {
"calcs": [
"mean",
"max",
"lastNotNull"
],
"displayMode": "table",
"placement": "bottom"
},
"tooltip": {
"mode": "multi"
}
},
"pluginVersion": "8.2.5",
"targets": [
{
"hide": false,
"query": "from(bucket: \"starlink\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_field\"] == \"downlink_throughput_bps\" or r[\"_field\"] == \"uplink_throughput_bps\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"last\")",
"refId": "A"
}
],
"timeFrom": null,
"timeShift": null,
"title": "Actual Throughput",
"type": "timeseries"
},
{
"datasource": "${DS_INFLUXDB}",
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "fraction_obstructed"
},
"properties": [
{
"id": "displayName",
"value": "Fraction Obstruction"
},
{
"id": "unit",
"value": "%"
}
]
},
{
"matcher": {
"id": "byName",
"options": "pop_ping_drop_rate"
},
"properties": [
{
"id": "displayName",
"value": "Pop Ping Drop Rate"
},
{
"id": "unit",
"value": "%"
}
]
},
{
"matcher": {
"id": "byName",
"options": "pop_ping_latency_ms"
},
"properties": [
{
"id": "displayName",
"value": "Pop Ping Latency Rate"
},
{
"id": "unit",
"value": "ms"
}
]
}
]
},
"gridPos": {
"h": 11,
"w": 12,
"x": 12,
"y": 0
},
"id": 2,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max",
"min"
],
"displayMode": "table",
"placement": "bottom"
},
"tooltip": {
"mode": "multi"
}
},
"pluginVersion": "8.2.5",
"targets": [
{
"query": "from(bucket: \"starlink\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_field\"] == \"pop_ping_latency_ms\" or r[\"_field\"] == \"pop_ping_drop_rate\" or r[\"_field\"] == \"fraction_obstructed\" or r[\"_field\"] == \"snr\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"last\")",
"refId": "A"
}
],
"timeFrom": null,
"timeShift": null,
"title": "Ping Latency, Drop Rate, Percent Obstructed & SNR",
"type": "timeseries"
},
{
"cacheTimeout": null,
"datasource": "${DS_INFLUXDB}",
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {
"align": null,
"displayMode": "auto",
"filterable": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "alerts"
},
"properties": [
{
"id": "displayName",
"value": "Alerts"
},
{
"id": "custom.width",
"value": 100
},
{
"id": "custom.align",
"value": "left"
}
]
},
{
"matcher": {
"id": "byName",
"options": "currently_obstructed"
},
"properties": [
{
"id": "displayName",
"value": "Currently Obstructed"
},
{
"id": "custom.width",
"value": 200
}
]
},
{
"matcher": {
"id": "byName",
"options": "hardware_version"
},
"properties": [
{
"id": "displayName",
"value": "Hardware Revision"
},
{
"id": "custom.width",
"value": 200
}
]
},
{
"matcher": {
"id": "byName",
"options": "software_version"
},
"properties": [
{
"id": "displayName",
"value": "Software Revision"
},
{
"id": "custom.width",
"value": 400
}
]
},
{
"matcher": {
"id": "byName",
"options": "state"
},
"properties": [
{
"id": "displayName",
"value": "State"
},
{
"id": "custom.width",
"value": 100
}
]
},
{
"matcher": {
"id": "byName",
"options": "alert_motors_stuck"
},
"properties": [
{
"id": "displayName",
"value": "Motor Stuck"
},
{
"id": "custom.width",
"value": 100
}
]
},
{
"matcher": {
"id": "byName",
"options": "alert_unexpected_location"
},
"properties": [
{
"id": "displayName",
"value": "Unexpected Location"
},
{
"id": "custom.width",
"value": 150
}
]
},
{
"matcher": {
"id": "byName",
"options": "alert_thermal_shutdown"
},
"properties": [
{
"id": "displayName",
"value": "Thermal Shutdown"
},
{
"id": "custom.width",
"value": 140
}
]
},
{
"matcher": {
"id": "byName",
"options": "alert_thermal_throttle"
},
"properties": [
{
"id": "displayName",
"value": "Thermal Throttle"
},
{
"id": "custom.width",
"value": 130
}
]
},
{
"matcher": {
"id": "byName",
"options": "uptime"
},
"properties": [
{
"id": "displayName",
"value": "Uptime"
},
{
"id": "custom.align",
"value": "left"
},
{
"id": "unit",
"value": "s"
}
]
},
{
"matcher": {
"id": "byName",
"options": "Time"
},
"properties": [
{
"id": "custom.width",
"value": 150
}
]
}
]
},
"gridPos": {
"h": 7,
"w": 24,
"x": 0,
"y": 11
},
"id": 6,
"interval": null,
"links": [],
"options": {
"frameIndex": 0,
"showHeader": true,
"sortBy": [
{
"desc": true,
"displayName": "Time (last)"
}
]
},
"pluginVersion": "8.2.5",
"targets": [
{
"query": "from(bucket: \"starlink\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_field\"] == \"hardware_version\" or r[\"_field\"] == \"state\" or r[\"_field\"] == \"software_version\" or r[\"_field\"] == \"alerts\" or r[\"_field\"] == \"currently_obstructed\" or r[\"_field\"] == \"alert_unexpected_location\" or r[\"_field\"] == \"alert_thermal_throttle\" or r[\"_field\"] == \"alert_thermal_shutdown\" or r[\"_field\"] == \"alert_motors_stuck\" or r[\"_field\"] == \"uptime\" )\n |> yield(name: \"last\")",
"refId": "A"
}
],
"timeFrom": null,
"timeShift": null,
"title": "Alerts & Versions",
"transformations": [
{
"id": "seriesToColumns",
"options": {
"byField": "Time"
}
}
],
"type": "table"
}
],
"refresh": false,
"schemaVersion": 32,
"style": "dark",
"tags": [],
"templating": {
"list": [
{
"description": null,
"error": null,
"hide": 2,
"label": "InfluxDB DataSource",
"name": "DS_INFLUXDB",
"query": "${VAR_DS_INFLUXDB}",
"skipUrlSync": false,
"type": "constant",
"current": {
"value": "${VAR_DS_INFLUXDB}",
"text": "${VAR_DS_INFLUXDB}",
"selected": false
},
"options": [
{
"value": "${VAR_DS_INFLUXDB}",
"text": "${VAR_DS_INFLUXDB}",
"selected": false
}
]
},
{
"description": null,
"error": null,
"hide": 2,
"label": "Table name for Statistics",
"name": "TBL_STATS",
"query": "${VAR_TBL_STATS}",
"skipUrlSync": false,
"type": "constant",
"current": {
"value": "${VAR_TBL_STATS}",
"text": "${VAR_TBL_STATS}",
"selected": false
},
"options": [
{
"value": "${VAR_TBL_STATS}",
"text": "${VAR_TBL_STATS}",
"selected": false
}
]
}
]
},
"time": {
"from": "now-30m",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "Starlink Statistics",
"uid": "ymkHwLaMz",
"version": 12
}

330
dish_grpc_influx2.py Normal file
View file

@ -0,0 +1,330 @@
#!/usr/bin/python3
"""Write Starlink user terminal data to an InfluxDB 2.x database.
This script pulls the current status info and/or metrics computed from the
history data and writes them to the specified InfluxDB 2.x database either once
or in a periodic loop.
Data will be written into the requested database with the following
measurement / series names:
: spacex.starlink.user_terminal.status : Current status data
: spacex.starlink.user_terminal.history : Bulk history data
: spacex.starlink.user_terminal.ping_stats : Ping history statistics
: spacex.starlink.user_terminal.usage : Usage history statistics
NOTE: The Starlink user terminal does not include time values with its
history or status data, so this script uses current system time to compute
the timestamps it sends to InfluxDB. It is recommended to run this script on
a host that has its system clock synced via NTP. Otherwise, the timestamps
may get out of sync with real time.
"""
from datetime import datetime
from datetime import timezone
import logging
import os
import signal
import sys
import time
import warnings
from influxdb_client import InfluxDBClient, WriteOptions, WritePrecision
import dish_common
HOST_URL = "localhost:8086"
BUCKET_DEFAULT = "starlinkstats"
BULK_MEASUREMENT = "spacex.starlink.user_terminal.history"
FLUSH_LIMIT = 6
MAX_BATCH = 5000
MAX_QUEUE_LENGTH = 864000
class Terminated(Exception):
pass
def handle_sigterm(signum, frame):
# Turn SIGTERM into an exception so main loop can clean up
raise Terminated
def parse_args():
parser = dish_common.create_arg_parser(output_description="write it to an InfluxDB 2.0 database")
group = parser.add_argument_group(title="InfluxDB 2.0 database options")
group.add_argument("-u",
"--url",
default=HOST_URL,
dest="url",
help="URL or IP AND Port of the InfluxDB 2.0 Host, default: " + HOST_URL)
group.add_argument("-T", "--token", help="Token to access the bucket")
group.add_argument("-B",
"--bucket",
default=BUCKET_DEFAULT,
help="Bucket name to use, default: " + BUCKET_DEFAULT)
group.add_argument("-O", "--org", help="Organisation name")
group.add_argument("-k",
"--skip-query",
action="store_true",
help="Skip querying for prior sample write point in bulk mode")
group.add_argument("-C",
"--ca-cert",
dest="verify_ssl",
help="Enable SSL/TLS using specified CA cert to verify broker",
metavar="FILENAME")
group.add_argument("-I",
"--insecure",
action="store_false",
dest="verify_ssl",
help="Enable SSL/TLS but disable certificate verification (INSECURE!)")
group.add_argument("-S",
"--secure",
action="store_true",
dest="verify_ssl",
help="Enable SSL/TLS using default CA cert")
env_map = (
("INFLUXDB_URL", "url"),
("INFLUXDB_TOKEN", "token"),
("INFLUXDB_Bucket", "bucket"),
("INFLUXDB_ORG", "org"),
("INFLUXDB_SSL", "verify_ssl"),
)
env_defaults = {}
for var, opt in env_map:
# check both set and not empty string
val = os.environ.get(var)
if val:
if var == "INFLUXDB_SSL" and val == "secure":
env_defaults[opt] = True
elif var == "INFLUXDB_SSL" and val == "insecure":
env_defaults[opt] = False
else:
env_defaults[opt] = val
parser.set_defaults(**env_defaults)
opts = dish_common.run_arg_parser(parser, need_id=True)
opts.icargs = {}
for key in ["url", "token", "bucket", "org", "verify_ssl"]:
val = getattr(opts, key)
if val is not None:
opts.icargs[key] = val
if opts.verify_ssl is not None:
opts.icargs["ssl"] = True
return opts
def flush_points(opts, gstate):
try:
write_api = gstate.influx_client.write_api(write_options=WriteOptions(batch_size=len(gstate.points),
flush_interval=10_000,
jitter_interval=2_000,
retry_interval=5_000,
max_retries=5,
max_retry_delay=30_000,
exponential_base=2))
while len(gstate.points) > MAX_BATCH:
write_api.write(record=gstate.points[:MAX_BATCH], write_precision=WritePrecision.S, bucket=opts.bucket)
if opts.verbose:
print("Data points written: " + str(MAX_BATCH))
del gstate.points[:MAX_BATCH]
if gstate.points:
write_api.write(record=gstate.points, write_precision=WritePrecision.S, bucket=opts.bucket)
if opts.verbose:
print("Data points written: " + str(len(gstate.points)))
gstate.points.clear()
write_api.flush()
write_api.close()
except Exception as e:
dish_common.conn_error(opts, "Failed writing to InfluxDB database: %s", str(e))
# If failures persist, don't just use infinite memory. Max queue
# is currently 10 days of bulk data, so something is very wrong
# if it's ever exceeded.
if len(gstate.points) > MAX_QUEUE_LENGTH:
logging.error("Max write queue exceeded, discarding data.")
del gstate.points[:-MAX_QUEUE_LENGTH]
return 1
return 0
def query_counter(opts, gstate, start, end):
try:
query_api = gstate.influx_client.query_api()
print(str(opts.bucket) + ' ' + str(start) + " " + str(end))
result = query_api.query('''
from(bucket: "{0}")
|> range(start: {1}, stop: {2})
|> filter(fn: (r) => r["_measurement"] == "{3}")
|> filter(fn: (r) => r["_field"] == "counter")
|> last()
|> yield(name: "last")
'''.format(opts.bucket, str(start), str(end), BULK_MEASUREMENT)
)
if result:
counter = result[0].records[0]['_value']
timestamp = result[0].records[0]['_time']
timestamp = time.mktime(timestamp.timetuple())
if counter and timestamp:
return int(counter), int(timestamp)
except TypeError as e:
logging.error(
"Skipping resumption from prior counter value. Reported error was: %s", str(e))
return None, 0
def sync_timebase(opts, gstate):
try:
db_counter, db_timestamp = query_counter(opts, gstate, gstate.start_timestamp, gstate.timestamp)
except Exception as e:
# could be temporary outage, so try again next time
dish_common.conn_error(opts, "Failed querying InfluxDB for prior count: %s", str(e))
return
gstate.timebase_synced = True
if db_counter and gstate.start_counter <= db_counter:
del gstate.deferred_points[:db_counter - gstate.start_counter]
if gstate.deferred_points:
delta_timestamp = db_timestamp - (gstate.deferred_points[0]["time"] - 1)
# to prevent +/- 1 second timestamp drift when the script restarts,
# if time base is within 2 seconds of that of the last sample in
# the database, correct back to that time base
if delta_timestamp == 0:
if opts.verbose:
print("Exactly synced with database time base")
elif -2 <= delta_timestamp <= 2:
if opts.verbose:
print("Replacing with existing time base: {0} -> {1}".format(
db_counter, datetime.fromtimestamp(db_timestamp, tz=timezone.utc)))
for point in gstate.deferred_points:
db_timestamp += 1
if point["time"] + delta_timestamp == db_timestamp:
point["time"] = db_timestamp
else:
# lost time sync when recording data, leave the rest
break
else:
gstate.timestamp = db_timestamp
else:
if opts.verbose:
print("Database time base out of sync by {0} seconds".format(delta_timestamp))
gstate.points.extend(gstate.deferred_points)
gstate.deferred_points.clear()
def loop_body(opts, gstate):
fields = {"status": {}, "ping_stats": {}, "usage": {}}
def cb_add_item(key, val, category):
fields[category][key] = val
def cb_add_sequence(key, val, category, start):
for i, subval in enumerate(val, start=start):
fields[category]["{0}_{1}".format(key, i)] = subval
def cb_add_bulk(bulk, count, timestamp, counter):
if gstate.start_timestamp is None:
gstate.start_timestamp = timestamp
gstate.start_counter = counter
points = gstate.points if gstate.timebase_synced else gstate.deferred_points
for i in range(count):
timestamp += 1
points.append({
"measurement": BULK_MEASUREMENT,
"tags": {
"id": gstate.dish_id
},
"time": timestamp,
"fields": {key: val[i] for key, val in bulk.items() if val[i] is not None},
})
if points:
# save off counter value for script restart
points[-1]["fields"]["counter"] = counter + count
now = time.time()
# work with UTC here
# now = time.mktime(datetime.utcnow().timetuple())
rc = dish_common.get_data(opts, gstate, cb_add_item, cb_add_sequence, add_bulk=cb_add_bulk)
if rc:
return rc
for category in fields:
if fields[category]:
gstate.points.append({
"measurement": "spacex.starlink.user_terminal." + category,
"tags": {
"id": gstate.dish_id
},
"time": int(now),
"fields": fields[category],
})
# This is here and not before the points being processed because if the
# query previously failed, there will be points that were processed in
# a prior loop. This avoids having to handle that as a special case.
if opts.bulk_mode and not gstate.timebase_synced:
sync_timebase(opts, gstate)
if opts.verbose:
print("Data points queued: " + str(len(gstate.points)))
if len(gstate.points) >= FLUSH_LIMIT:
return flush_points(opts, gstate)
return 0
def main():
opts = parse_args()
logging.basicConfig(format="%(levelname)s: %(message)s")
gstate = dish_common.GlobalState(target=opts.target)
gstate.points = []
gstate.deferred_points = []
gstate.timebase_synced = opts.skip_query
gstate.start_timestamp = None
gstate.start_counter = None
if "verify_ssl" in opts.icargs and not opts.icargs["verify_ssl"]:
# user has explicitly said be insecure, so don't warn about it
warnings.filterwarnings("ignore", message="Unverified HTTPS request")
signal.signal(signal.SIGTERM, handle_sigterm)
try:
gstate.influx_client = InfluxDBClient(**opts.icargs)
except TypeError as _err:
print('Error while creating influx client: ' + str(_err))
rc = 0
try:
next_loop = time.monotonic()
while True:
rc = loop_body(opts, gstate)
if opts.loop_interval > 0.0:
now = time.monotonic()
next_loop = max(next_loop + opts.loop_interval, now)
time.sleep(next_loop - now)
else:
break
except Terminated:
pass
finally:
if gstate.points:
rc = flush_points(opts, gstate)
gstate.influx_client.close()
gstate.shutdown()
sys.exit(rc)
if __name__ == '__main__':
main()

View file

@ -4,4 +4,5 @@ protobuf>=3.6.0
yagrc>=1.1.1 yagrc>=1.1.1
paho-mqtt>=1.5.1 paho-mqtt>=1.5.1
influxdb>=5.3.1 influxdb>=5.3.1
influxdb_client>=1.23.0
pypng>=0.0.20 pypng>=0.0.20

View file

@ -0,0 +1,12 @@
[Unit]
Description=Starlink GRPC to InfluxDB 2.x exporter
After=network.target
[Service]
Type=simple
WorkingDirectory=/opt/starlink-grpc-tools/
Environment=INFLUXDB_URL=http://localhost:8086 INFLUXDB_TOKEN=<changeme> INFLUXDB_Bucket=<changeme> INFLUXDB_ORG=<changeme> INFLUXDB_SSL=false
ExecStart=/opt/starlink-grpc-tools/venv/bin/python3 dish_grpc_influx2.py -t 10 status alert_detail
[Install]
WantedBy=multi-user.target