Add the non-abs wedge_fraction_obstructed status
I had only added the one that the Starlink app uses to show obstructions, because it didn't seem like the other one was all that useful, but people seem to be interested in studying the difference between the 2, so might as well have it. This is in the obstruction_detail group, along with the other one. I'm kinda regretting naming the first one as I did, though, because it's now a little confusing between my naming and the naming in the grpc message. Since this is a new field, also had to implement schema updates for the sqlite script.
This commit is contained in:
parent
ec61333710
commit
67b0045ac8
2 changed files with 59 additions and 14 deletions
|
@ -31,7 +31,7 @@ import time
|
||||||
import dish_common
|
import dish_common
|
||||||
import starlink_grpc
|
import starlink_grpc
|
||||||
|
|
||||||
SCHEMA_VERSION = 1
|
SCHEMA_VERSION = 2
|
||||||
|
|
||||||
|
|
||||||
class Terminated(Exception):
|
class Terminated(Exception):
|
||||||
|
@ -49,6 +49,11 @@ def parse_args():
|
||||||
parser.add_argument("database", help="Database file to use")
|
parser.add_argument("database", help="Database file to use")
|
||||||
|
|
||||||
group = parser.add_argument_group(title="sqlite database options")
|
group = parser.add_argument_group(title="sqlite database options")
|
||||||
|
group.add_argument("-f",
|
||||||
|
"--force",
|
||||||
|
action="store_true",
|
||||||
|
help="Override database schema downgrade protection; may result in "
|
||||||
|
"discarded data")
|
||||||
group.add_argument("-k",
|
group.add_argument("-k",
|
||||||
"--skip-query",
|
"--skip-query",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
|
@ -151,16 +156,23 @@ def ensure_schema(opts, conn):
|
||||||
cur.close()
|
cur.close()
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if not version or not version[0]:
|
||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
if version[0]:
|
|
||||||
print("Upgrading schema from version:", version)
|
|
||||||
else:
|
|
||||||
print("Initializing new database")
|
print("Initializing new database")
|
||||||
|
create_tables(conn, "")
|
||||||
|
elif version[0] > SCHEMA_VERSION and not opts.force:
|
||||||
|
logging.error("Cowardly refusing to downgrade from schema version %s", version[0])
|
||||||
|
raise Terminated
|
||||||
|
else:
|
||||||
|
print("Converting from schema version:", version[0])
|
||||||
|
convert_tables(conn)
|
||||||
|
|
||||||
# If/when more fields get added or changed, the schema will have changed
|
cur.execute("PRAGMA user_version={0}".format(SCHEMA_VERSION))
|
||||||
# and this will have to handle the upgrade case. For now, just create the
|
cur.close()
|
||||||
# new tables.
|
conn.commit()
|
||||||
|
|
||||||
|
|
||||||
|
def create_tables(conn, suffix):
|
||||||
tables = {}
|
tables = {}
|
||||||
name_groups = starlink_grpc.status_field_names()
|
name_groups = starlink_grpc.status_field_names()
|
||||||
type_groups = starlink_grpc.status_field_types()
|
type_groups = starlink_grpc.status_field_types()
|
||||||
|
@ -187,20 +199,45 @@ def ensure_schema(opts, conn):
|
||||||
return "TEXT"
|
return "TEXT"
|
||||||
raise TypeError
|
raise TypeError
|
||||||
|
|
||||||
|
column_info = {}
|
||||||
|
cur = conn.cursor()
|
||||||
for table, group_pairs in tables.items():
|
for table, group_pairs in tables.items():
|
||||||
|
column_names = ["time", "id"]
|
||||||
columns = ['"time" INTEGER NOT NULL', '"id" TEXT NOT NULL']
|
columns = ['"time" INTEGER NOT NULL', '"id" TEXT NOT NULL']
|
||||||
for name_group, type_group in group_pairs:
|
for name_group, type_group in group_pairs:
|
||||||
for name_item, type_item in zip(name_group, type_group):
|
for name_item, type_item in zip(name_group, type_group):
|
||||||
name_item = dish_common.BRACKETS_RE.match(name_item).group(1)
|
name_item = dish_common.BRACKETS_RE.match(name_item).group(1)
|
||||||
if name_item != "id":
|
if name_item != "id":
|
||||||
columns.append('"{0}" {1}'.format(name_item, sql_type(type_item)))
|
columns.append('"{0}" {1}'.format(name_item, sql_type(type_item)))
|
||||||
sql = 'CREATE TABLE "{0}" ({1}, PRIMARY KEY("time","id"))'.format(table, ", ".join(columns))
|
column_names.append(name_item)
|
||||||
|
cur.execute('DROP TABLE IF EXISTS "{0}{1}"'.format(table, suffix))
|
||||||
|
sql = 'CREATE TABLE "{0}{1}" ({2}, PRIMARY KEY("time","id"))'.format(
|
||||||
|
table, suffix, ", ".join(columns))
|
||||||
cur.execute(sql)
|
cur.execute(sql)
|
||||||
|
column_info[table] = column_names
|
||||||
cur.execute("PRAGMA user_version={0}".format(SCHEMA_VERSION))
|
|
||||||
|
|
||||||
cur.close()
|
cur.close()
|
||||||
conn.commit()
|
|
||||||
|
return column_info
|
||||||
|
|
||||||
|
|
||||||
|
def convert_tables(conn):
|
||||||
|
new_column_info = create_tables(conn, "_new")
|
||||||
|
conn.row_factory = sqlite3.Row
|
||||||
|
old_cur = conn.cursor()
|
||||||
|
new_cur = conn.cursor()
|
||||||
|
for table, new_columns in new_column_info.items():
|
||||||
|
old_cur.execute('SELECT * FROM "{0}"'.format(table))
|
||||||
|
old_columns = set(x[0] for x in old_cur.description)
|
||||||
|
new_columns = tuple(x for x in new_columns if x in old_columns)
|
||||||
|
sql = 'INSERT OR REPLACE INTO "{0}_new" ({1}) VALUES ({2})'.format(
|
||||||
|
table, ",".join('"' + x + '"' for x in new_columns),
|
||||||
|
",".join(repeat("?", len(new_columns))))
|
||||||
|
new_cur.executemany(sql, (tuple(row[col] for col in new_columns) for row in old_cur))
|
||||||
|
new_cur.execute('DROP TABLE "{0}"'.format(table))
|
||||||
|
new_cur.execute('ALTER TABLE {0}_new RENAME TO {0}'.format(table))
|
||||||
|
old_cur.close()
|
||||||
|
new_cur.close()
|
||||||
|
conn.row_factory = None
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
@ -214,10 +251,10 @@ def main():
|
||||||
|
|
||||||
signal.signal(signal.SIGTERM, handle_sigterm)
|
signal.signal(signal.SIGTERM, handle_sigterm)
|
||||||
gstate.sql_conn = sqlite3.connect(opts.database)
|
gstate.sql_conn = sqlite3.connect(opts.database)
|
||||||
ensure_schema(opts, gstate.sql_conn)
|
|
||||||
|
|
||||||
rc = 0
|
rc = 0
|
||||||
try:
|
try:
|
||||||
|
ensure_schema(opts, gstate.sql_conn)
|
||||||
next_loop = time.monotonic()
|
next_loop = time.monotonic()
|
||||||
while True:
|
while True:
|
||||||
rc = loop_body(opts, gstate)
|
rc = loop_body(opts, gstate)
|
||||||
|
|
|
@ -83,6 +83,11 @@ terminal has determined to be obstructed.
|
||||||
30 degrees East of North, and subsequent wedges rotate 30 degrees further
|
30 degrees East of North, and subsequent wedges rotate 30 degrees further
|
||||||
in the same direction. (It's not clear if this will hold true at all
|
in the same direction. (It's not clear if this will hold true at all
|
||||||
latitudes.)
|
latitudes.)
|
||||||
|
: **raw_wedges_fraction_obstructed** : A 12 element sequence. Wedges
|
||||||
|
presumably correlate with the ones in *wedges_fraction_obstructed*, but
|
||||||
|
the exact relationship is unknown. The numbers in this one are generally
|
||||||
|
higher and may represent fraction of the wedge, in which case max value
|
||||||
|
for each element should be 1.
|
||||||
: **valid_s** : It is unclear what this field means exactly, but it appears to
|
: **valid_s** : It is unclear what this field means exactly, but it appears to
|
||||||
be a measure of how complete the data is that the user terminal uses to
|
be a measure of how complete the data is that the user terminal uses to
|
||||||
determine obstruction locations.
|
determine obstruction locations.
|
||||||
|
@ -399,6 +404,7 @@ def status_field_names():
|
||||||
"seconds_obstructed",
|
"seconds_obstructed",
|
||||||
], [
|
], [
|
||||||
"wedges_fraction_obstructed[12]",
|
"wedges_fraction_obstructed[12]",
|
||||||
|
"raw_wedges_fraction_obstructed[12]",
|
||||||
"valid_s",
|
"valid_s",
|
||||||
], alert_names
|
], alert_names
|
||||||
|
|
||||||
|
@ -434,6 +440,7 @@ def status_field_types():
|
||||||
float, # seconds_obstructed
|
float, # seconds_obstructed
|
||||||
], [
|
], [
|
||||||
float, # wedges_fraction_obstructed[]
|
float, # wedges_fraction_obstructed[]
|
||||||
|
float, # raw_wedges_fraction_obstructed[]
|
||||||
float, # valid_s
|
float, # valid_s
|
||||||
], [bool] * len(dish_pb2.DishAlerts.DESCRIPTOR.fields)
|
], [bool] * len(dish_pb2.DishAlerts.DESCRIPTOR.fields)
|
||||||
|
|
||||||
|
@ -542,6 +549,7 @@ def status_data(context=None):
|
||||||
"seconds_obstructed": status.obstruction_stats.last_24h_obstructed_s,
|
"seconds_obstructed": status.obstruction_stats.last_24h_obstructed_s,
|
||||||
}, {
|
}, {
|
||||||
"wedges_fraction_obstructed[]": status.obstruction_stats.wedge_abs_fraction_obstructed,
|
"wedges_fraction_obstructed[]": status.obstruction_stats.wedge_abs_fraction_obstructed,
|
||||||
|
"raw_wedges_fraction_obstructed[]": status.obstruction_stats.wedge_fraction_obstructed,
|
||||||
"valid_s": status.obstruction_stats.valid_s,
|
"valid_s": status.obstruction_stats.valid_s,
|
||||||
}, alerts
|
}, alerts
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue