2021-02-03 19:23:01 -06:00
|
|
|
#!/usr/bin/python3
|
|
|
|
"""Write Starlink user terminal data to a sqlite database.
|
|
|
|
|
|
|
|
This script pulls the current status info and/or metrics computed from the
|
|
|
|
history data and writes them to the specified sqlite database either once or
|
|
|
|
in a periodic loop.
|
|
|
|
|
2021-02-27 18:39:28 -06:00
|
|
|
Requested data will be written into the following tables:
|
|
|
|
|
|
|
|
: status : Current status data
|
|
|
|
: history : Bulk history data
|
|
|
|
: ping_stats : Ping history statistics
|
|
|
|
: usage : Usage history statistics
|
|
|
|
|
2021-02-03 19:23:01 -06:00
|
|
|
Array data is currently written to the database as text strings of comma-
|
|
|
|
separated values, which may not be the best method for some use cases. If you
|
|
|
|
find yourself wishing they were handled better, please open a feature request
|
|
|
|
at https://github.com/sparky8512/starlink-grpc-tools/issues explaining the use
|
|
|
|
case and how you would rather see it. This only affects a few fields, since
|
|
|
|
most of the useful data is not in arrays.
|
|
|
|
|
2021-02-15 21:23:37 -06:00
|
|
|
Note that using this script to record the alert_detail group mode will tend to
|
|
|
|
trip schema-related errors when new alert types are added to the dish
|
|
|
|
software. The error message will include something like "table status has no
|
|
|
|
column named alert_foo", where "foo" is the newly added alert type. To work
|
|
|
|
around this rare occurrence, you can pass the -f option to force a schema
|
|
|
|
update. Alternatively, instead of using the alert_detail mode, you can use the
|
|
|
|
alerts bitmask in the status group.
|
|
|
|
|
2021-02-03 19:23:01 -06:00
|
|
|
NOTE: The Starlink user terminal does not include time values with its
|
|
|
|
history or status data, so this script uses current system time to compute
|
|
|
|
the timestamps it writes into the database. It is recommended to run this
|
|
|
|
script on a host that has its system clock synced via NTP. Otherwise, the
|
|
|
|
timestamps may get out of sync with real time.
|
|
|
|
"""
|
|
|
|
|
|
|
|
from datetime import datetime
|
|
|
|
from datetime import timezone
|
|
|
|
from itertools import repeat
|
|
|
|
import logging
|
|
|
|
import signal
|
|
|
|
import sqlite3
|
|
|
|
import sys
|
|
|
|
import time
|
|
|
|
|
|
|
|
import dish_common
|
|
|
|
import starlink_grpc
|
|
|
|
|
2021-09-05 19:41:13 -05:00
|
|
|
SCHEMA_VERSION = 3
|
2021-02-03 19:23:01 -06:00
|
|
|
|
|
|
|
|
|
|
|
class Terminated(Exception):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
def handle_sigterm(signum, frame):
|
|
|
|
# Turn SIGTERM into an exception so main loop can clean up
|
|
|
|
raise Terminated
|
|
|
|
|
|
|
|
|
|
|
|
def parse_args():
|
|
|
|
parser = dish_common.create_arg_parser(output_description="write it to a sqlite database")
|
|
|
|
|
|
|
|
parser.add_argument("database", help="Database file to use")
|
|
|
|
|
|
|
|
group = parser.add_argument_group(title="sqlite database options")
|
2021-02-12 21:53:28 -06:00
|
|
|
group.add_argument("-f",
|
|
|
|
"--force",
|
|
|
|
action="store_true",
|
2021-02-15 21:23:37 -06:00
|
|
|
help="Force schema conversion, even if it results in downgrade; may "
|
|
|
|
"result in discarded data")
|
2021-02-03 19:23:01 -06:00
|
|
|
group.add_argument("-k",
|
|
|
|
"--skip-query",
|
|
|
|
action="store_true",
|
|
|
|
help="Skip querying for prior sample write point in bulk mode")
|
|
|
|
|
|
|
|
opts = dish_common.run_arg_parser(parser, need_id=True)
|
|
|
|
|
2021-02-27 17:57:35 -06:00
|
|
|
opts.skip_query |= opts.no_counter
|
|
|
|
|
2021-02-03 19:23:01 -06:00
|
|
|
return opts
|
|
|
|
|
|
|
|
|
2021-02-27 17:57:35 -06:00
|
|
|
def query_counter(opts, gstate, column, table):
|
2021-02-03 19:23:01 -06:00
|
|
|
now = time.time()
|
|
|
|
cur = gstate.sql_conn.cursor()
|
|
|
|
cur.execute(
|
2021-02-27 17:57:35 -06:00
|
|
|
'SELECT "time", "{0}" FROM "{1}" WHERE "time"<? AND "id"=? '
|
|
|
|
'ORDER BY "time" DESC LIMIT 1'.format(column, table), (now, gstate.dish_id))
|
2021-02-03 19:23:01 -06:00
|
|
|
row = cur.fetchone()
|
|
|
|
cur.close()
|
|
|
|
|
|
|
|
if row and row[0] and row[1]:
|
|
|
|
if opts.verbose:
|
|
|
|
print("Existing time base: {0} -> {1}".format(
|
|
|
|
row[1], datetime.fromtimestamp(row[0], tz=timezone.utc)))
|
|
|
|
return row
|
|
|
|
else:
|
|
|
|
return 0, None
|
|
|
|
|
|
|
|
|
|
|
|
def loop_body(opts, gstate):
|
|
|
|
tables = {"status": {}, "ping_stats": {}, "usage": {}}
|
|
|
|
hist_cols = ["time", "id"]
|
|
|
|
hist_rows = []
|
|
|
|
|
|
|
|
def cb_add_item(key, val, category):
|
|
|
|
tables[category][key] = val
|
|
|
|
|
|
|
|
def cb_add_sequence(key, val, category, start):
|
|
|
|
tables[category][key] = ",".join(str(subv) if subv is not None else "" for subv in val)
|
|
|
|
|
|
|
|
def cb_add_bulk(bulk, count, timestamp, counter):
|
|
|
|
if len(hist_cols) == 2:
|
|
|
|
hist_cols.extend(bulk.keys())
|
|
|
|
hist_cols.append("counter")
|
|
|
|
for i in range(count):
|
|
|
|
timestamp += 1
|
|
|
|
counter += 1
|
|
|
|
row = [timestamp, gstate.dish_id]
|
|
|
|
row.extend(val[i] for val in bulk.values())
|
|
|
|
row.append(counter)
|
|
|
|
hist_rows.append(row)
|
|
|
|
|
|
|
|
now = int(time.time())
|
2021-02-27 17:57:35 -06:00
|
|
|
rc = dish_common.get_status_data(opts, gstate, cb_add_item, cb_add_sequence)
|
|
|
|
|
|
|
|
if opts.history_stats_mode and not rc:
|
|
|
|
if gstate.counter_stats is None and not opts.skip_query and opts.samples < 0:
|
|
|
|
_, gstate.counter_stats = query_counter(opts, gstate, "end_counter", "ping_stats")
|
|
|
|
rc = dish_common.get_history_stats(opts, gstate, cb_add_item, cb_add_sequence)
|
2021-02-03 19:23:01 -06:00
|
|
|
|
|
|
|
if opts.bulk_mode and not rc:
|
2021-02-21 16:06:01 -06:00
|
|
|
if gstate.counter is None and not opts.skip_query and opts.bulk_samples < 0:
|
2021-02-27 17:57:35 -06:00
|
|
|
gstate.timestamp, gstate.counter = query_counter(opts, gstate, "counter", "history")
|
2021-02-03 19:23:01 -06:00
|
|
|
rc = dish_common.get_bulk_data(opts, gstate, cb_add_bulk)
|
|
|
|
|
|
|
|
rows_written = 0
|
|
|
|
|
|
|
|
try:
|
|
|
|
cur = gstate.sql_conn.cursor()
|
|
|
|
for category, fields in tables.items():
|
|
|
|
if fields:
|
|
|
|
sql = 'INSERT OR REPLACE INTO "{0}" ("time","id",{1}) VALUES ({2})'.format(
|
|
|
|
category, ",".join('"' + x + '"' for x in fields),
|
|
|
|
",".join(repeat("?",
|
|
|
|
len(fields) + 2)))
|
|
|
|
values = [now, gstate.dish_id]
|
|
|
|
values.extend(fields.values())
|
|
|
|
cur.execute(sql, values)
|
|
|
|
rows_written += 1
|
|
|
|
|
|
|
|
if hist_rows:
|
|
|
|
sql = 'INSERT OR REPLACE INTO "history" ({0}) VALUES({1})'.format(
|
|
|
|
",".join('"' + x + '"' for x in hist_cols), ",".join(repeat("?", len(hist_cols))))
|
|
|
|
cur.executemany(sql, hist_rows)
|
|
|
|
rows_written += len(hist_rows)
|
|
|
|
|
|
|
|
cur.close()
|
|
|
|
gstate.sql_conn.commit()
|
|
|
|
except sqlite3.OperationalError as e:
|
|
|
|
# these are not necessarily fatal, but also not much can do about
|
2021-02-15 21:23:37 -06:00
|
|
|
logging.error("Unexpected error from database, discarding data: %s", e)
|
2021-02-03 19:23:01 -06:00
|
|
|
rc = 1
|
|
|
|
else:
|
|
|
|
if opts.verbose:
|
|
|
|
print("Rows written to db:", rows_written)
|
|
|
|
|
|
|
|
return rc
|
|
|
|
|
|
|
|
|
2021-02-15 20:50:22 -06:00
|
|
|
def ensure_schema(opts, conn, context):
|
2021-02-03 19:23:01 -06:00
|
|
|
cur = conn.cursor()
|
|
|
|
cur.execute("PRAGMA user_version")
|
|
|
|
version = cur.fetchone()
|
2021-02-15 21:23:37 -06:00
|
|
|
if version and version[0] == SCHEMA_VERSION and not opts.force:
|
2021-02-03 19:23:01 -06:00
|
|
|
cur.close()
|
2021-02-15 20:50:22 -06:00
|
|
|
return 0
|
2021-02-03 19:23:01 -06:00
|
|
|
|
2021-02-15 20:50:22 -06:00
|
|
|
try:
|
|
|
|
if not version or not version[0]:
|
|
|
|
if opts.verbose:
|
|
|
|
print("Initializing new database")
|
|
|
|
create_tables(conn, context, "")
|
|
|
|
elif version[0] > SCHEMA_VERSION and not opts.force:
|
|
|
|
logging.error("Cowardly refusing to downgrade from schema version %s", version[0])
|
|
|
|
return 1
|
|
|
|
else:
|
|
|
|
print("Converting from schema version:", version[0])
|
|
|
|
convert_tables(conn, context)
|
|
|
|
cur.execute("PRAGMA user_version={0}".format(SCHEMA_VERSION))
|
|
|
|
conn.commit()
|
|
|
|
return 0
|
|
|
|
except starlink_grpc.GrpcError as e:
|
|
|
|
dish_common.conn_error(opts, "Failure reflecting status fields: %s", str(e))
|
|
|
|
return 1
|
|
|
|
finally:
|
|
|
|
cur.close()
|
2021-02-03 19:23:01 -06:00
|
|
|
|
|
|
|
|
2021-02-15 20:50:22 -06:00
|
|
|
def create_tables(conn, context, suffix):
|
2021-02-03 19:23:01 -06:00
|
|
|
tables = {}
|
2021-02-15 20:50:22 -06:00
|
|
|
name_groups = starlink_grpc.status_field_names(context=context)
|
|
|
|
type_groups = starlink_grpc.status_field_types(context=context)
|
2021-02-03 19:23:01 -06:00
|
|
|
tables["status"] = zip(name_groups, type_groups)
|
|
|
|
|
|
|
|
name_groups = starlink_grpc.history_stats_field_names()
|
|
|
|
type_groups = starlink_grpc.history_stats_field_types()
|
|
|
|
tables["ping_stats"] = zip(name_groups[0:5], type_groups[0:5])
|
|
|
|
tables["usage"] = ((name_groups[5], type_groups[5]),)
|
|
|
|
|
|
|
|
name_groups = starlink_grpc.history_bulk_field_names()
|
|
|
|
type_groups = starlink_grpc.history_bulk_field_types()
|
|
|
|
tables["history"] = ((name_groups[1], type_groups[1]), (["counter"], [int]))
|
|
|
|
|
|
|
|
def sql_type(type_class):
|
|
|
|
if issubclass(type_class, float):
|
|
|
|
return "REAL"
|
|
|
|
if issubclass(type_class, bool):
|
|
|
|
# advisory only, stores as int:
|
|
|
|
return "BOOLEAN"
|
|
|
|
if issubclass(type_class, int):
|
|
|
|
return "INTEGER"
|
|
|
|
if issubclass(type_class, str):
|
|
|
|
return "TEXT"
|
|
|
|
raise TypeError
|
|
|
|
|
2021-02-12 21:53:28 -06:00
|
|
|
column_info = {}
|
|
|
|
cur = conn.cursor()
|
2021-02-03 19:23:01 -06:00
|
|
|
for table, group_pairs in tables.items():
|
2021-02-12 21:53:28 -06:00
|
|
|
column_names = ["time", "id"]
|
2021-02-03 19:23:01 -06:00
|
|
|
columns = ['"time" INTEGER NOT NULL', '"id" TEXT NOT NULL']
|
|
|
|
for name_group, type_group in group_pairs:
|
|
|
|
for name_item, type_item in zip(name_group, type_group):
|
|
|
|
name_item = dish_common.BRACKETS_RE.match(name_item).group(1)
|
|
|
|
if name_item != "id":
|
|
|
|
columns.append('"{0}" {1}'.format(name_item, sql_type(type_item)))
|
2021-02-12 21:53:28 -06:00
|
|
|
column_names.append(name_item)
|
|
|
|
cur.execute('DROP TABLE IF EXISTS "{0}{1}"'.format(table, suffix))
|
|
|
|
sql = 'CREATE TABLE "{0}{1}" ({2}, PRIMARY KEY("time","id"))'.format(
|
|
|
|
table, suffix, ", ".join(columns))
|
2021-02-03 19:23:01 -06:00
|
|
|
cur.execute(sql)
|
2021-02-12 21:53:28 -06:00
|
|
|
column_info[table] = column_names
|
2021-02-03 19:23:01 -06:00
|
|
|
cur.close()
|
2021-02-12 21:53:28 -06:00
|
|
|
|
|
|
|
return column_info
|
|
|
|
|
|
|
|
|
2021-02-15 20:50:22 -06:00
|
|
|
def convert_tables(conn, context):
|
|
|
|
new_column_info = create_tables(conn, context, "_new")
|
2021-02-12 21:53:28 -06:00
|
|
|
conn.row_factory = sqlite3.Row
|
|
|
|
old_cur = conn.cursor()
|
|
|
|
new_cur = conn.cursor()
|
|
|
|
for table, new_columns in new_column_info.items():
|
|
|
|
old_cur.execute('SELECT * FROM "{0}"'.format(table))
|
|
|
|
old_columns = set(x[0] for x in old_cur.description)
|
|
|
|
new_columns = tuple(x for x in new_columns if x in old_columns)
|
|
|
|
sql = 'INSERT OR REPLACE INTO "{0}_new" ({1}) VALUES ({2})'.format(
|
|
|
|
table, ",".join('"' + x + '"' for x in new_columns),
|
|
|
|
",".join(repeat("?", len(new_columns))))
|
|
|
|
new_cur.executemany(sql, (tuple(row[col] for col in new_columns) for row in old_cur))
|
|
|
|
new_cur.execute('DROP TABLE "{0}"'.format(table))
|
|
|
|
new_cur.execute('ALTER TABLE {0}_new RENAME TO {0}'.format(table))
|
|
|
|
old_cur.close()
|
|
|
|
new_cur.close()
|
|
|
|
conn.row_factory = None
|
2021-02-03 19:23:01 -06:00
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
opts = parse_args()
|
|
|
|
|
|
|
|
logging.basicConfig(format="%(levelname)s: %(message)s")
|
|
|
|
|
2021-02-15 20:50:22 -06:00
|
|
|
gstate = dish_common.GlobalState(target=opts.target)
|
2021-02-03 19:23:01 -06:00
|
|
|
gstate.points = []
|
|
|
|
gstate.deferred_points = []
|
|
|
|
|
|
|
|
signal.signal(signal.SIGTERM, handle_sigterm)
|
|
|
|
gstate.sql_conn = sqlite3.connect(opts.database)
|
|
|
|
|
|
|
|
rc = 0
|
|
|
|
try:
|
2021-02-15 20:50:22 -06:00
|
|
|
rc = ensure_schema(opts, gstate.sql_conn, gstate.context)
|
|
|
|
if rc:
|
|
|
|
sys.exit(rc)
|
2021-02-03 19:23:01 -06:00
|
|
|
next_loop = time.monotonic()
|
|
|
|
while True:
|
|
|
|
rc = loop_body(opts, gstate)
|
|
|
|
if opts.loop_interval > 0.0:
|
|
|
|
now = time.monotonic()
|
|
|
|
next_loop = max(next_loop + opts.loop_interval, now)
|
|
|
|
time.sleep(next_loop - now)
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
except sqlite3.Error as e:
|
|
|
|
logging.error("Database error: %s", e)
|
|
|
|
rc = 1
|
|
|
|
except Terminated:
|
|
|
|
pass
|
|
|
|
finally:
|
|
|
|
gstate.sql_conn.close()
|
|
|
|
gstate.shutdown()
|
|
|
|
|
|
|
|
sys.exit(rc)
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
main()
|